
    i                     ^   U d dl Z d dlZd dlZd dlZd dlZd dlmZ d dlZ e j                  e	      Z
defdZ e       r"ej                  j                         s ed      ej                  j                   Zej                  j$                  Zej                  j(                  Zej                  j,                  Zej                  j0                  Z e       rd dlmZmZmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z5  G d dejl                        Z7i Z8e9e:ejv                  f   e<d	<   dd
e:de:fdZ=ej|                  dk7  rd dlm?Z? ddl@mAZAmBZB ddlC ddlCmDZDmEZEmFZFmGZGmHZHmIZImJZJmKZKmLZL ddlMmNZN ddlOmPZPmQZQmOZO  e1        y G d d      ZReRej                  d   _.        y)    N)	timedeltareturnc                  6    t        t        j                  d      S )a  
    Return ``True`` if the distributed package is available.

    Otherwise,
    ``torch.distributed`` does not expose any other APIs. Currently,
    ``torch.distributed`` is available on Linux, MacOS and Windows. Set
    ``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source.
    Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows,
    ``USE_DISTRIBUTED=0`` for MacOS.
    
_c10d_init)hasattrtorch_C     T/var/www/html/engine/venv/lib/python3.12/site-packages/torch/distributed/__init__.pyis_availabler      s     588\**r   z&Failed to initialize torch.distributed)_broadcast_coalesced"_compute_bucket_assignment_by_size_ControlCollectives_DEFAULT_FIRST_BUCKET_BYTES_make_nccl_premul_sum_register_builtin_comm_hook_register_comm_hook_StoreCollectives_test_python_store_verify_params_across_processesBackendBuiltinCommHookType
DebugLevel	FileStoreget_debug_level
GradBucketLoggerPrefixStoreProcessGroupReducerset_debug_levelset_debug_level_from_envStoreTCPStoreWorkc                       e Zd ZdZd Zy)_DistributedPdbz
        Supports using PDB from inside a multiprocessing child process.

        Usage:
        _DistributedPdb().set_trace()
        c                     t         j                  }	 t        d      t         _        t        j                  j
                  | g|i | |t         _        y # |t         _        w xY w)Nz
/dev/stdin)sysstdinopenpdbPdbinteraction)selfargskwargs_stdins       r   r/   z_DistributedPdb.interactionL   sG    YYF# .	##D:4:6:"	F	s   7A A"N)__name__
__module____qualname____doc__r/   r
   r   r   r(   r(   D   s    		#r   r(   _breakpoint_cacherankskipc                 f   |dkD  rdt        t        t        j                                     }t        j                  |d      dz   }|t        |<   ||k  rt        j                  d|       y|]t        j                  j                  j                  D ]6  }t        j                  j                  j                  t        |      |       8 t               | k(  r1t               } |j                   d|  d        |j"                          t        j$                  j'                         }t        j$                  j)                         }t        j$                  j+                  d       	 t-                t        j$                  j+                  |       ~y# t        j$                  j+                  |       ~w xY w)	aD  
        Set a breakpoint, but only on a single rank.  All other ranks will wait for you to be
        done with the breakpoint before continuing.

        Args:
            rank (int): Which rank to break on.  Default: ``0``
            skip (int): Skip the first ``skip`` calls to this breakpoint. Default: ``0``.
        r      zSkip the breakpoint, counter=%dN)secondszS
!!! ATTENTION !!!

Type 'up' to get to the frame that called dist.breakpoint(rank=z)
F)hashstr	traceback
format_excr8   getlogwarningr   distributeddistributed_c10d_pg_map_set_pg_timeoutr   get_rankr(   message	set_tracer	   _meta_in_tls_dispatch_include_DisableTorchDispatch!_set_meta_in_tls_dispatch_includebarrier)	r9   r:   	timeout_skeycountergroupr-   meta_in_tlsguards	            r   
breakpointrV   V   s_    !8s9//123C'++C3a7G%,c"$=wG  **;;CC !!22BBi0%
 :!#CCKKRRVQWWZ\ CMMO hh<<>..02259	IHH66{C HH66{Cs   #
F "F0win32)	HashStorer<   )
DeviceMeshinit_device_mesh)*)	_all_gather_base_coalescing_manager_CoalescingManager_create_process_group_wrapper_get_process_group_name_rank_not_in_group_reduce_scatter_base_time_estimatorget_node_local_rank)_remote_device)_create_store_from_optionsregister_rendezvous_handler
rendezvousc                       e Zd Zy)_ProcessGroupStubN)r4   r5   r6   r
   r   r   rj   rj      s    r   rj   ztorch.distributed)r   r   i  )Tloggingr-   r*   r@   typingdatetimer   r   	getLoggerr4   rC   boolr   r	   r   RuntimeError
_DistError	DistError_DistBackendErrorDistBackendError_DistNetworkErrorDistNetworkError_DistStoreErrorDistStoreError_DistQueueEmptyErrorQueueEmptyErrortorch._C._distributed_c10dr   r   r   r   r   r   r   r   r   r   r   _Backendr   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   _Workr.   r(   r8   dictintAny__annotations__rV   platformrX   device_meshrY   rZ   rF   r\   r]   r^   r_   r`   ra   rb   rc   rd   remote_devicere   rh   rf   rg   rj   modulesr
   r   r   <module>r      so    
 
     g!+d + >%((--/
?
@@ HH	88-- 88-- ))((//>      8##'' #  02tCO,1( ( (T ||w89
 $
 
 
 .    5FCKK#$1r   