
    rhP&                        d dl Z d dlZd dlmZ d dlmZ ddlmZmZ	 ddl
mZmZmZmZ ddlmZmZmZmZmZmZ ddlmZ d	d
lmZmZmZmZmZmZ  e j>                  e       Z!ejD                  jF                  Z#ed        Z$d Z% ede$dd      Z& eejN                  d      Z( eejN                  dde#jN                  jR                        Z* eejV                  de#jV                  jX                        Z- e	j\                  e#jN                        dddd       Z/ e	j\                  e#jV                        d	d	ddd       Z0y)    N)counters)CKGemmTemplate   )irlowering)autotune_select_algorithmExternKernelChoiceSymbolicGridFnTritonTemplate)_use_cutlass_for_opuse_aten_gemm_kernelsuse_ck_gemm_templateuse_cpp_bmm_templateuse_cutlass_templateuse_triton_template)V   )_is_static_problemaddmm_epilogueis_batch_stride_largestmm_argsmm_config_kwargs
mm_optionsc                :     |||d          |||d         z  | dfS )NBLOCK_MBLOCK_Nr    )bmnmetacdivs        m/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/torch/_inductor/kernel/bmm.pybmm_gridr$   &   s*    DO$tAtI'??AFF    c                 2    | dkD  s
|dkD  s|dkD  ry| |z  dkD  S )N   Ti   r   )r   r    ks      r#   _is_large_block_for_cpur)   +   s&    3w!c'QWq55=r%   bmma	  
{{def_kernel("A", "B")}}
    M = {{size("A", -2)}}
    N = {{size("B", -1)}}
    K = {{size("A", -1)}}

    stride_aq = {{stride("A", 0)}}
    stride_am = {{stride("A", 1)}}
    stride_ak = {{stride("A", 2)}}

    stride_bq = {{stride("B", 0)}}
    stride_bk = {{stride("B", 1)}}
    stride_bn = {{stride("B", 2)}}

    # based on triton.ops.matmul
    pid = tl.program_id(0)
    grid_m = (M + BLOCK_M - 1) // BLOCK_M
    grid_n = (N + BLOCK_N - 1) // BLOCK_N

    # re-order program ID for better L2 performance
    width = GROUP_M * grid_n
    group_id = pid // width
    group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
    pid_m = group_id * GROUP_M + (pid % group_size)
    pid_n = (pid % width) // (group_size)
    tl.assume(pid_m >= 0)
    tl.assume(pid_n >= 0)

    rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1):
        ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
    else:
        ram = rm % M
    if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1):
        rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
    else:
        rbn = rn % N

    rk = tl.arange(0, BLOCK_K)

    idx_q = tl.program_id(1)  # batch dimension for BMM
    A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
    B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)

    acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
    for k in range(K, 0, -BLOCK_K):
        if EVEN_K:
            a = tl.load(A)
            b = tl.load(B)
        else:
            a = tl.load(A, mask=rk[None, :] < k, other=0.)
            b = tl.load(B, mask=rk[:, None] < k, other=0.)
        acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
        A += BLOCK_K * stride_ak
        B += BLOCK_K * stride_bk

    # rematerialize rm and rn to save registers
    rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    idx_q = tl.program_id(1)  # batch dimension for BMM
    idx_m = rm[:, None]
    idx_n = rn[None, :]
    mask = (idx_m < M) & (idx_n < N)

    # inductor generates a suffix
    {{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
T)namegridsource"cache_codegen_enabled_for_templatezat::bmm_outzat::_bmm_out_dtype_cuda	bmm_dtype)r+   op_overloadzat::baddbmm_out)r0   layoutc                   t        d | |fD              r| j                         d   dk(  s|j                         d   dk(  rWt        j                  | d      } t        j                  |d      }t        j                  t        j
                  | |      d      S d }d fd} ||       r0t        j                  j                  j                  d	   } || |      }  ||      r0t        j                  j                  j                  d   } |||      }t        | |||
      \  }}	}
}} }| j                         d	   }t        d   d| d| d|	 d|
 xx   dz  cc<   t        j                  d|||	|
| j                         |j                         |       |r?| j                         j                   dk(  sJ d       t"        j%                  | |f||      }nt&        j%                  | |f|      }t)               r|gng }t+        j,                  |       }t        j.                  j1                  |      }| j                         }t3        |      rZ|J d        |||	|
fi t5        |t6        |j8                        D ]*  }t;        j<                  |f| |f|dt?        |||	|
|       , tA        |      \  }}tC        | ||      }|r6|r4tE        |||	|
      r&tG        d      rddl$m%} |jM                  ||| |g       tO        || |      rddl(m)} |jU                  ||| |g       tW        |||	|
      rtY        jZ                  ||| |g       t]        d|| |g|      S )z`
    Lowering for autotuning aten.bmm with different backends (Aten, Triton, CUTLASS, etc.)
    c              3   V   K   | ]!  }|j                         j                  d k(   # yw)cpuN)
get_devicetype).0xs     r#   	<genexpr>ztuned_bmm.<locals>.<genexpr>   s!     
>A1<<>%'
>s   ')r   r   )axisc                     t        j                  |       syt        j                  | d      \  }}t        |t         j                        S )NTF)freeze)r   is_storage_and_layoutas_storage_and_layout
isinstanceFlexibleLayout)t_r2   s      r#   is_valid_to_require_contiguousz1tuned_bmm.<locals>.is_valid_to_require_contiguous   s<    ++A.005AIAvfb&7&788r%   c                     |d   dk(  xr | d   dk(  xs |d   | d   k\  xs |d   dk(  xr | d   dk(  xs |d   | d   k\  S )Nr;   r   r   )sizesstridess     r#    is_preferred_layout_as_bmm_inputz3tuned_bmm.<locals>.is_preferred_layout_as_bmm_input   sf     q QeBi1n&PuRy8PU"+"Sb	Q(R'"+r:RUr%   c                     |j                   d   j                         }|j                   d   j                         } ||      st        j                  j                  |       } | S )Nval)r!   sizestrider   ExternKernelrequire_contiguous)rC   meta_trH   rI   rJ   s       r#   may_require_contiguousz)tuned_bmm.<locals>.may_require_contiguous   sT    KK&++-Ekk%(//1G3E7COO66q9Hr%   r   )r2   	out_dtypeaten_mm_infoz	aten.bmm_rD   zZTuned aten.bmm: batch=%s, m=%s, n=%s, k=%s, mat1_dtype=%s, mat2_dtype=%s, output_layout=%scudaz$out_dtype is only supported for CUDA)rS   z%out_dtype is not supported for Tritoninput_nodesr2   r*   )CUTLASS3xGemmTemplate)CppBmmTemplate)/allget_sizeL	unsqueezesum_mulr   graphcurrent_nodeargsr   r   loginfo	get_dtyper6   r7   aten_bmm_dtypebindaten_bmmr   r   get_device_typechoicesget_base_mm_configsr   r   r)   itemsizebmm_templatemaybe_append_choicer   r   r   r   r   codegen.cuda.gemm_templaterX   add_cutlass_gemm_choicesr   codegen.cpp_bmm_templaterY   add_choicesr   r   add_ck_gemm_choicesr   )mat1mat2rS   r2   rE   rR   	meta_mat1	meta_mat2r   r    r(   
batch_size	aten_funcrj   device_typebmm_configsdtypeconfigrD   
is_nonzerobatch_stride_largestrX   rY   rJ   s                          @r#   	tuned_bmmr      sN   
 
>$
>>==?1"dmmoa&8A&=;;tR(D;;tQ'D66!%%d+!44	9	U	 *$/,,11!4I)$	:D)$/,,11!4I)$	:D")d6Y#Aq!VT4
 #J^yAaS!AaSABaGBHHd				  %%/W1WW/"''tf	'R	MM4,7	 34yk"G$$T*K))//<KNNE6" I"II !
 {,CU^^T	
 	F ,,!4L VQ1f5		 'v.MAz24vF Aq1&F66wtUFD$/=""4L	
 FAq!,**7FT4LI$UGdD\6JJr%   )alphabetar2   c                   t        ||| |      \  }}}}}}} |j                         d   }	t        d   d|	 d| d| d| xx   dz  cc<   t        j	                  d|	||||j                         |j                         | j                         |	       t               rt        j                  | ||f|||      gng }
t        j                  |      }t        j                  j                  |      }t        |      ry ||||fi t        |t               D ]]  }t#        j$                  |
f| ||f|d	t'        |||||      dt)        |j*                  ||      t-        d
|j*                  ||g      d _ t/        d|
| ||g|      S )Nr1   r   rT   zaten.baddbmm_rD   r   zkTuned aten.baddbmm: batch_size=%s, m=%s, n=%s, k=%s, mat1_dtype=%s, mat2_dtype=%s, inp=%s, output_layout=%s)r   r   rV   r   )prefix_argsepilogue_fnepilogue_fn_hashbaddbmm)r   r[   r   rc   rd   re   r   aten_baddbmmrg   r   ri   r   rj   rk   r   r   r)   rm   rn   r   r   r|   strr   )inprt   ru   r   r   r2   r   r    r(   rx   rj   rz   r{   r}   s                 r#   tuned_baddbmmr      s   '.tT3v'N$Aq!VT4 #J^}ZL!AaS!EF!KFHHu			
 !" 
		Ct,fE		MN  $$T*K))//<K6"!q!
'5LM
 	F ,, $- VQ1f5	
 *6<<E!$&6eT%R!S	 %Y#tT9JFSSr%   )N)1loggingtorchtorch._dynamo.utilsr   7torch._inductor.codegen.rocm.ck_universal_gemm_templater    r   r   r\   select_algorithmr   r	   r
   r   utilsr   r   r   r   r   r   virtualizedr   	mm_commonr   r   r   r   r   r   	getLogger__name__rc   opsatenr$   r)   rm   r*   rh   	dtype_outrf   r   outr   register_loweringr   r   r   r%   r#   <module>r      sM     ( R       g!yy~~ G G 		CH (,OHT eii7#	II	""	 "	MM$$,,2B2B
 TXXoKD oK oKd T\\",-Ad *T #*Tr%   