
    rhd                         d Z ddlZddlZddlmZmZmZmZm	Z	 ddl
mZ ddlZddlmZ g dZ ed      Z ed      Zed	   Zed
   Z	 d1dedededeej(                     def
dZ	 d1dedededeej(                     def
dZ	 d1dedededededeej(                     defdZdededefdZdedefdZ	 d1dedee	eef      defdZ	 	 	 d2dedededeej(                     def
dZ	 	 	 d2dedededeej(                     def
dZ	 	 	 	 	 d3dedededededeej(                     defdZdededefdZdedefdZ dedefd Z!dedefd!Z"d4ded"edefd#Z#dede$eef   fd$Z%	 	 d5ded%edeej(                     defd&Z&	 	 d5ded%edeej(                     defd'Z'ded(edefd)Z(	 	 	 	 d6deded(ededeej(                     defd*Z)	 	 	 	 d6deded(ededeej(                     defd+Z*	 	 d7ded%edeej(                     defd,Z+	 	 d8ded-ededeej(                     def
d.Z,d/eeef   deeef   fd0Z- e-e      Z. e-e      Z/ e-e      Z0 e-e"      Z1 e-e#      Z2 e-e&      Z3 e-e'      Z4 e-e)      Z5 e-e*      Z6 e-e+      Z7 e-e,      Z8y)9zHThis file contains utilities for initializing neural network parameters.    N)CallableLiteralOptionalTypeVarUnion)	ParamSpec)Tensor)calculate_gainuniform_normal_trunc_normal_	constant_ones_zeros_eye_dirac_xavier_uniform_xavier_normal_kaiming_uniform_kaiming_normal_orthogonal_sparse_uniformnormalconstanteyediracxavier_uniformxavier_normalkaiming_uniformkaiming_normal
orthogonalsparse_R_P)linearconv1dconv2dconv3dconv_transpose1dconv_transpose2dconv_transpose3dsigmoidtanhrelu
leaky_reluselu)fan_infan_outtensorab	generatorreturnc                 ~    t        j                         5  | j                  |||      cd d d        S # 1 sw Y   y xY wNr7   )torchno_gradr   r4   r5   r6   r7   s       `/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/torch/nn/init.py_no_grad_uniform_r@   D   s4     
 :q!y9: : :   3<meanstdc                 ~    t        j                         5  | j                  |||      cd d d        S # 1 sw Y   y xY wr:   )r<   r=   r   r4   rB   rC   r7   s       r?   _no_grad_normal_rF   K   s4     
 >~~dC9~=> > >rA   c                    dt         dt         fd}||d|z  z
  k  s||d|z  z   kD  rt        j                  dd       t        j                         5   |||z
  |z        } |||z
  |z        }| j                  d|z  dz
  d|z  dz
  |       | j                          | j                  |t        j                  d	      z         | j                  |       | j                  ||
       | cd d d        S # 1 sw Y   y xY w)Nxr8   c                 d    dt        j                  | t        j                  d      z        z   dz  S )N      ?       @)matherfsqrt)rH   s    r?   norm_cdfz(_no_grad_trunc_normal_.<locals>.norm_cdf^   s(    dhhq499S>122c99       zjmean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.
stacklevel   r;   rK   )minmax)floatwarningswarnr<   r=   r   erfinv_mul_rL   rN   add_clamp_)	r4   rB   rC   r5   r6   r7   rO   lus	            r?   _no_grad_trunc_normal_r`   U   s    :E :e : 	q1s7{q1s7{ 2;	
 
  a$h#%&a$h#%& 	A	1q519	B 	 	C$))C.()D 	!#+  s   BC55C>valc                 x    t        j                         5  | j                  |      cd d d        S # 1 sw Y   y xY wN)r<   r=   fill_r4   ra   s     r?   _no_grad_fill_rf      s,    	 !||C ! ! !s   09c                 v    t        j                         5  | j                         cd d d        S # 1 sw Y   y xY wrc   )r<   r=   zero_r4   s    r?   _no_grad_zero_rj      s)    	 ||~  s   /8nonlinearityparamc                 \   g d}| |v s| dk(  ry| dk(  ry| dk(  rt        j                  d      S | dk(  re|d	}nBt        |t              st        |t              st        |t
              r|}nt        d
| d      t        j                  dd|dz  z   z        S | dk(  r	 yt        d|        )a  Return the recommended gain value for the given nonlinearity function.

    The values are as follows:

    ================= ====================================================
    nonlinearity      gain
    ================= ====================================================
    Linear / Identity :math:`1`
    Conv{1,2,3}D      :math:`1`
    Sigmoid           :math:`1`
    Tanh              :math:`\frac{5}{3}`
    ReLU              :math:`\sqrt{2}`
    Leaky Relu        :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
    SELU              :math:`\frac{3}{4}`
    ================= ====================================================

    .. warning::
        In order to implement `Self-Normalizing Neural Networks`_ ,
        you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
        This gives the initial weights a variance of ``1 / N``,
        which is necessary to induce a stable fixed point in the forward pass.
        In contrast, the default gain for ``SELU`` sacrifices the normalization
        effect for more stable gradient flow in rectangular layers.

    Args:
        nonlinearity: the non-linear function (`nn.functional` name)
        param: optional parameter for the non-linear function

    Examples:
        >>> gain = nn.init.calculate_gain(
        ...     "leaky_relu", 0.2
        ... )  # leaky_relu with negative_slope=0.2

    .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
    )r&   r'   r(   r)   r*   r+   r,   r-   rT   r.   g?r/   rK   r0   {Gz?znegative_slope z not a valid numberrQ   r1   g      ?zUnsupported nonlinearity )rL   rN   
isinstanceboolintrW   
ValueError)rk   rl   
linear_fnsnegative_slopes       r?   r
   r
      s    LJ z!\Y%>				yy~		%=!N5$'5#&%' #Nug5HIJJyyNA$5 5677			
 4\NCDDrP   c                     t         j                  j                  |       r*t         j                  j                  t        | f| |||      S t        | |||      S )a  Fill the input Tensor with values drawn from the uniform distribution.

    :math:`\mathcal{U}(a, b)`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the lower bound of the uniform distribution
        b: the upper bound of the uniform distribution
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.uniform_(w)
    r>   )r<   	overrideshas_torch_function_variadichandle_torch_functionr   r@   r>   s       r?   r   r      sT    ( 226:44vi!qI 5 
 	
 VQ955rP   c                     t         j                  j                  |       r*t         j                  j                  t        | f| |||      S t        | |||      S )a  Fill the input Tensor with values drawn from the normal distribution.

    :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.normal_(w)
    rE   )r<   rv   rw   rx   r   rF   rE   s       r?   r   r      sT    ( 226:44fYvDcY 5 
 	
 FD#y99rP   c                 $    t        | |||||      S )a  Fill the input Tensor with values drawn from a truncated normal distribution.

    The values are effectively drawn from the
    normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
    with values outside :math:`[a, b]` redrawn until they are within
    the bounds. The method used for generating the random values works
    best when :math:`a \leq \text{mean} \leq b`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution
        a: the minimum cutoff value
        b: the maximum cutoff value
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.trunc_normal_(w)
    r;   )r`   )r4   rB   rC   r5   r6   r7   s         r?   r   r     s    8 "&$QYOOrP   c                     t         j                  j                  |       r(t         j                  j                  t        | f| |      S t        | |      S )zFill the input Tensor with the value :math:`\text{val}`.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        val: the value to fill the tensor with

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.constant_(w, 0.3)
    re   )r<   rv   rw   rx   r   rf   re   s     r?   r   r   *  sL     226:44yS 5 
 	
 &#&&rP   c                     t        | d      S )zFill the input Tensor with the scalar value `1`.

    Args:
        tensor: an n-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.ones_(w)
    rJ   )rf   ri   s    r?   r   r   <  s     &#&&rP   c                     t        |       S )zFill the input Tensor with the scalar value `0`.

    Args:
        tensor: an n-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.zeros_(w)
    )rj   ri   s    r?   r   r   I  s     &!!rP   c                     | j                         dk7  rt        d      t        j                         5  t        j                  | j
                  | | j                  d ddd       | S # 1 sw Y   | S xY w)a=  Fill the 2-dimensional input `Tensor` with the identity matrix.

    Preserves the identity of the inputs in `Linear` layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.eye_(w)
    rQ   ,Only tensors with 2 dimensions are supported)outrequires_gradN)
ndimensionrr   r<   r=   r   shaper   ri   s    r?   r   r   V  sa     aGHH	 Q		6<<V6;O;OPQMQMs   +A((A2groupsc                    | j                         }|dvrt        d      | j                         }|d   |z  dk7  rt        d      |d   |z  }t        ||d         }t	        j
                         5  | j                          t        |      D ]  }t        |      D ]  }|dk(  r!d| ||z  |z   || j                  d      dz  f<   )|dk(  r4d| ||z  |z   || j                  d      dz  | j                  d      dz  f<   bd| ||z  |z   || j                  d      dz  | j                  d      dz  | j                  d      dz  f<     	 d	d	d	       | S # 1 sw Y   | S xY w)
aF  Fill the {3, 4, 5}-dimensional input `Tensor` with the Dirac delta function.

    Preserves the identity of the inputs in `Convolutional`
    layers, where as many input channels are preserved as possible. In case
    of groups>1, each group of channels preserves identity

    Args:
        tensor: a {3, 4, 5}-dimensional `torch.Tensor`
        groups (int, optional): number of groups in the conv layer (default: 1)
    Examples:
        >>> w = torch.empty(3, 16, 5, 5)
        >>> nn.init.dirac_(w)
        >>> w = torch.empty(3, 24, 5, 5)
        >>> nn.init.dirac_(w, 3)
    )         z5Only tensors with 3, 4, or 5 dimensions are supportedr   z!dim 0 must be divisible by groupsrT   r   rQ   r   N)r   rr   sizerU   r<   r=   rh   range)r4   r   
dimensionssizesout_chans_per_grpmin_dimgds           r?   r   r   k  s     ""$J"PQQKKMEQx&A<==aF*#U1X.G	 v 	A7^ ?PQF10014aQ19LLM1_  --1A!+A!+-  --1A!+A!+A!+	-	, M-, Ms   1CEEc                     | j                         }|dk  rt        d      | j                  d      }| j                  d      }d}| j                         dkD  r| j                  dd  D ]  }||z  }	 ||z  }||z  }||fS )NrQ   zNFan in and fan out can not be computed for tensor with fewer than 2 dimensionsrT   r   )dimrr   r   r   )r4   r   num_input_fmapsnum_output_fmapsreceptive_field_sizesr2   r3   s           r?   _calculate_fan_in_and_fan_outr     s    JA~\
 	
 kk!nO{{1~zz|a ab! 	&A A% 	&33F!55G7?rP   gainc                     t        |       \  }}|t        j                  dt        ||z         z        z  }t        j                  d      |z  }t	        | | ||      S )a  Fill the input `Tensor` with values using a Xavier uniform distribution.

    The method is described in `Understanding the difficulty of training
    deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010).
    The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-a, a)` where

    .. math::
        a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain("relu"))

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.xavier_uniform_(w.T, ...)``.
    rK         @)r   rL   rN   rW   r@   )r4   r   r7   r2   r3   rC   r5   s          r?   r   r     sZ    D 4F;OFG
3v'7!889
9C		#AVaRI66rP   c                     t        |       \  }}|t        j                  dt        ||z         z        z  }t	        | d||      S )a  Fill the input `Tensor` with values using a Xavier normal distribution.

    The method is described in `Understanding the difficulty of training deep feedforward
    neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor
    will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where

    .. math::
        \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_normal_(w)

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.xavier_normal_(w.T, ...)``.
    rK           )r   rL   rN   rW   rF   )r4   r   r7   r2   r3   rC   s         r?   r   r     sF    B 4F;OFG
3v'7!889
9CFCi88rP   modec                     |j                         }ddg}||vrt        d| d|       t        |       \  }}|dk(  r|S |S )Nr2   r3   zMode z" not supported, please use one of )lowerrr   r   )r4   r   valid_modesr2   r3   s        r?   _calculate_correct_fanr     sX    ::<DY'K;5&HVWW3F;OFGX%6272rP   c           	         t         j                  j                  |       r+t         j                  j                  t        | f| ||||      S d| j
                  v rt        j                  d       | S t        | |      }t        ||      }|t        j                  |      z  }t        j                  d      |z  }t        j                         5  | j                  | ||      cddd       S # 1 sw Y   yxY w)a  Fill the input `Tensor` with values using a Kaiming uniform distribution.

    The method is described in `Delving deep into rectifiers: Surpassing
    human-level performance on ImageNet classification` - He, K. et al. (2015).
    The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-\text{bound}, \text{bound})` where

    .. math::
        \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}

    Also known as He initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the negative slope of the rectifier used after this layer (only
            used with ``'leaky_relu'``)
        mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
            preserves the magnitude of the variance of the weights in the
            forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
            backwards pass.
        nonlinearity: the non-linear function (`nn.functional` name),
            recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.kaiming_uniform_(w, mode="fan_in", nonlinearity="relu")

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.kaiming_uniform_(w.T, ...)``.
    )r4   r5   r   rk   r7   r   ,Initializing zero-element tensors is a no-opr   r;   N)r<   rv   rw   rx   r   r   rX   rY   r   r
   rL   rN   r=   r   )	r4   r5   r   rk   r7   fanr   rC   bounds	            r?   r   r     s    V 226:44I% 5 
 	
 	FLLDE
 
.C,*D
3
CIIcNS E	 Cvu	BC C Cs   C++C4c                 (   d| j                   v rt        j                  d       | S t        | |      }t	        ||      }|t        j                  |      z  }t        j                         5  | j                  d||      cddd       S # 1 sw Y   yxY w)a  Fill the input `Tensor` with values using a Kaiming normal distribution.

    The method is described in `Delving deep into rectifiers: Surpassing
    human-level performance on ImageNet classification` - He, K. et al. (2015).
    The resulting tensor will have values sampled from
    :math:`\mathcal{N}(0, \text{std}^2)` where

    .. math::
        \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}

    Also known as He initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        a: the negative slope of the rectifier used after this layer (only
            used with ``'leaky_relu'``)
        mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
            preserves the magnitude of the variance of the weights in the
            forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
            backwards pass.
        nonlinearity: the non-linear function (`nn.functional` name),
            recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.kaiming_normal_(w, mode="fan_out", nonlinearity="relu")

    Note:
        Be aware that ``fan_in`` and ``fan_out`` are calculated assuming
        that the weight matrix is used in a transposed manner,
        (i.e., ``x @ w.T`` in ``Linear`` layers, where ``w.shape = [fan_out, fan_in]``).
        This is important for correct initialization.
        If you plan to use ``x @ w``, where ``w.shape = [fan_in, fan_out]``,
        pass in a transposed weight matrix, i.e. ``nn.init.kaiming_normal_(w.T, ...)``.
    r   r   r;   N)
r   rX   rY   r   r
   rL   rN   r<   r=   r   )r4   r5   r   rk   r7   r   r   rC   s           r?   r   r   P  s}    V 	FLLDE
 
.C,*D
3
C	 ;~~a	~:; ; ;s   *BBc                    | j                         dk  rt        d      | j                         dk(  r| S | j                  d      }| j                         |z  }| j	                  ||f      j                  dd|      }||k  r|j                          t        j                  j                  |      \  }}t        j                  |d      }|j                         }	||	z  }||k  r|j                          t        j                         5  | j                  |      j                  |       | j                  |       ddd       | S # 1 sw Y   | S xY w)a  Fill the input `Tensor` with a (semi) orthogonal matrix.

    Described in `Exact solutions to the nonlinear dynamics of learning in deep
    linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
    at least 2 dimensions, and for tensors with more than 2 dimensions the
    trailing dimensions are flattened.

    Args:
        tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
        gain: optional scaling factor
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
        >>> w = torch.empty(3, 5)
        >>> nn.init.orthogonal_(w)
    rQ   z4Only tensors with 2 or more dimensions are supportedr   rT   r;   N)r   rr   numelr   	new_emptyr   t_r<   linalgqrdiagsignr=   view_ascopy_r[   )
r4   r   r7   rowscols	flattenedqrr   phs
             r?   r   r     s   , QOPP||~;;q>D<<>T!D  $.66q!y6QId{ <<??9%DAq

1aA	
BGAd{		 q"D M Ms   2EEsparsityc                    | j                         dk7  rt        d      | j                  \  }}t        t	        j
                  ||z              }t        j                         5  | j                  d||       t        |      D ]#  }t        j                  |      }|d| }	d| |	|f<   % 	 ddd       | S # 1 sw Y   | S xY w)a  Fill the 2D input `Tensor` as a sparse matrix.

    The non-zero elements will be drawn from the normal distribution
    :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
    Hessian-free optimization` - Martens, J. (2010).

    Args:
        tensor: an n-dimensional `torch.Tensor`
        sparsity: The fraction of elements in each column to be set to zero
        std: the standard deviation of the normal distribution used to generate
            the non-zero values
        generator: the torch Generator to sample from (default: None)

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.sparse_(w, sparsity=0.1)
    rQ   r   r   r;   N)r   rr   r   rq   rL   ceilr<   r=   r   r   randperm)
r4   r   rC   r7   r   r   	num_zeroscol_idxrow_indiceszero_indicess
             r?   r   r     s    . aGHHJD$DIIho./I	 .q#3T{ 	.G...K&z	2L,-F<()	.. M. Ms   #AB44B>methc                       j                   d d dt        j                  dt        j                  dt        f fd}d d d d	|_        |_         |S )
Nargskwargsr8   c                  Z    t        j                  d d dt        d        | i |S )Nz	`nn.init.z)` is now deprecated in favor of `nn.init.z`.rQ   rR   )rX   rY   FutureWarning)r   r   r   new_nameold_names     r?   deprecated_initz(_make_deprecate.<locals>.deprecated_init  s;    z!J8*TVW	

 T$V$$rP   z
    z_(...)

    .. warning::
        This method is now deprecated in favor of :func:`torch.nn.init.z"`.

    See :func:`~torch.nn.init.z` for details.)__name__r%   r   r   r$   __doc__)r   r   r   r   s   ` @@r?   _make_deprecater     sy    }}H}H%rww %")) % %$J H IQz R'j:O  (OrP   rc   )r   rJ   N)r   rJ   g       rK   N)rT   )rJ   N)r   r2   r0   N)rT   N)rn   N)9r   rL   rX   typingr   r   r   	_Optionalr   r   typing_extensionsr   r<   r	   __all__r$   r%   _NonlinearityType_FanModerW   	Generatorr@   rF   r`   rf   rj   rq   r
   r   r   r   r   r   r   r   r   tupler   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#    rP   r?   <module>r      sh   N   K K '  > T]t_  &' QU:::!&:3<U__3M:: -1	>>
> 
> )	>
 >  -1))
) 
) 	)
 ) )) )X!6 ! !& !
6 f  LPGE#GE,5eCJ6G,HGE
GEX ,0	666 6 )	6
 6: ,0	::
: 
: )	:
 :: ,0PP
P 
P 	P
 P )P P>'f '5 'V '$
'& 
'V 
'
"6 
"f 
" F *26 23 2v 2j& U38_ . ,0&7&7
&7 )&7 	&7V ,0$9$9
$9 )$9 	$9N36 3 3c 3 &2,0>C>C>C >C $	>C
 )>C >CF &2,02;2;2; 2; $	2;
 )2; 2;n ,000
0 )0 	0l ,0	### 
# )	#
 #N(2r6* xB/? . (
#		!9%d 1/!"23 1[)
		!rP   