Skip to content

Commit

Permalink
Generate Python docs from pytorch/pytorch@d06624d
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Apr 20, 2023
1 parent 273cdd3 commit 7ed53a6
Show file tree
Hide file tree
Showing 13 changed files with 297 additions and 215 deletions.
Binary file modified docs/main/_images/RReLU.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion docs/main/_modules/torch/_dynamo/backends/registry.html
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ <h1>Source code for torch._dynamo.backends.registry</h1><div class="highlight"><

<span class="n">import_submodule</span><span class="p">(</span><span class="n">backends</span><span class="p">)</span>

<span class="kn">from</span> <span class="nn">..debug_utils</span> <span class="kn">import</span> <span class="n">dynamo_minifier_backend</span>
<span class="kn">from</span> <span class="nn">..repro.after_dynamo</span> <span class="kn">import</span> <span class="n">dynamo_minifier_backend</span>

<span class="k">assert</span> <span class="n">dynamo_minifier_backend</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>

Expand Down
10 changes: 7 additions & 3 deletions docs/main/_modules/torch/_dynamo/eval_frame.html
Original file line number Diff line number Diff line change
Expand Up @@ -805,7 +805,11 @@ <h1>Source code for torch._dynamo.eval_frame</h1><div class="highlight"><pre>

<span class="k">class</span> <span class="nc">RunOnlyContext</span><span class="p">(</span><span class="n">_TorchDynamoContext</span><span class="p">):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">callback</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="c1"># cudagraph trees relies on generation increment</span>
<span class="k">def</span> <span class="nf">on_enter</span><span class="p">():</span>
<span class="n">torch</span><span class="o">.</span><span class="n">_dynamo</span><span class="o">.</span><span class="n">mutation_guard</span><span class="o">.</span><span class="n">GenerationTracker</span><span class="o">.</span><span class="n">generation</span> <span class="o">+=</span> <span class="mi">1</span>

<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">callback</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">on_enter</span><span class="o">=</span><span class="n">on_enter</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">DisableContext</span><span class="p">(</span><span class="n">_TorchDynamoContext</span><span class="p">):</span>
Expand Down Expand Up @@ -874,7 +878,7 @@ <h1>Source code for torch._dynamo.eval_frame</h1><div class="highlight"><pre>


<span class="k">def</span> <span class="nf">get_compiler_fn</span><span class="p">(</span><span class="n">compiler_fn</span><span class="p">):</span>
<span class="kn">from</span> <span class="nn">.debug_utils</span> <span class="kn">import</span> <span class="n">wrap_backend_debug</span>
<span class="kn">from</span> <span class="nn">.repro.after_dynamo</span> <span class="kn">import</span> <span class="n">wrap_backend_debug</span>

<span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">compiler_fn</span><span class="p">,</span> <span class="s2">&quot;compiler_name&quot;</span><span class="p">):</span>
<span class="n">compiler_str</span> <span class="o">=</span> <span class="n">compiler_fn</span><span class="o">.</span><span class="n">compiler_name</span>
Expand Down Expand Up @@ -1220,7 +1224,7 @@ <h1>Source code for torch._dynamo.eval_frame</h1><div class="highlight"><pre>

<span class="n">remove_from_cache</span><span class="p">(</span><span class="n">f</span><span class="p">)</span>
<span class="k">with</span> <span class="n">patch</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.most_recent_backend&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">),</span> <span class="n">config</span><span class="o">.</span><span class="n">patch</span><span class="p">(</span>
<span class="n">specialize_int</span><span class="o">=</span><span class="kc">True</span>
<span class="n">summarize_dim_constraints</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">specialize_int</span><span class="o">=</span><span class="kc">True</span>
<span class="p">):</span>
<span class="n">opt_f</span> <span class="o">=</span> <span class="n">optimize_assert</span><span class="p">(</span>
<span class="n">dynamo_normalization_capturing_compiler</span><span class="p">,</span>
Expand Down
4 changes: 2 additions & 2 deletions docs/main/_modules/torch/amp/autocast_mode.html
Original file line number Diff line number Diff line change
Expand Up @@ -775,8 +775,8 @@ <h1>Source code for torch.amp.autocast_mode</h1><div class="highlight"><pre>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">device</span> <span class="o">==</span> <span class="bp">self</span><span class="o">.</span><span class="n">custom_backend_name</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">autocast_decrement_nesting</span><span class="p">()</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">torch</span><span class="o">.</span><span class="n">clear_autocast_cache</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">custom_device_mod</span><span class="o">.</span><span class="n">set_autocast_enabled</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_enabled</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">custom_device_mod</span><span class="o">.</span><span class="n">set_autocast_dtype</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fast_dtype</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">custom_device_mod</span><span class="o">.</span><span class="n">set_autocast_enabled</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">prev</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">custom_device_mod</span><span class="o">.</span><span class="n">set_autocast_dtype</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">prev_fastdtype</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">autocast_decrement_nesting</span><span class="p">()</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">torch</span><span class="o">.</span><span class="n">clear_autocast_cache</span><span class="p">()</span>
Expand Down
58 changes: 0 additions & 58 deletions docs/main/_modules/torch/cuda.html
Original file line number Diff line number Diff line change
Expand Up @@ -1352,64 +1352,6 @@ <h1>Source code for torch.cuda</h1><div class="highlight"><pre>



<span class="k">def</span> <span class="nf">_get_device</span><span class="p">(</span><span class="n">device</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">:</span>
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Return the torch.device type object from the passed in device.</span>

<span class="sd"> Args:</span>
<span class="sd"> device (torch.device or int): selected device.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">device</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">device</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span>
<span class="k">return</span> <span class="n">device</span>


<span class="k">def</span> <span class="nf">_get_generator</span><span class="p">(</span><span class="n">device</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">torch</span><span class="o">.</span><span class="n">_C</span><span class="o">.</span><span class="n">Generator</span><span class="p">:</span>
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Return the CUDA Generator object for the given device.</span>

<span class="sd"> Args:</span>
<span class="sd"> device (torch.device): selected device.</span>
<span class="sd"> &quot;&quot;&quot;</span>

<span class="n">idx</span> <span class="o">=</span> <span class="n">device</span><span class="o">.</span><span class="n">index</span>
<span class="k">if</span> <span class="n">idx</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">idx</span> <span class="o">=</span> <span class="n">current_device</span><span class="p">()</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">default_generators</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span>


<span class="k">def</span> <span class="nf">_set_rng_state_offset</span><span class="p">(</span><span class="n">offset</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span> <span class="n">device</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">]</span> <span class="o">=</span> <span class="s1">&#39;cuda&#39;</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Sets the random number generator state offset of the specified GPU.</span>

<span class="sd"> Args:</span>
<span class="sd"> offset (int): The desired offset</span>
<span class="sd"> device (torch.device or int, optional): The device to set the RNG state.</span>
<span class="sd"> Default: ``&#39;cuda&#39;`` (i.e., ``torch.device(&#39;cuda&#39;)``, the current CUDA device).</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">final_device</span> <span class="o">=</span> <span class="n">_get_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">cb</span><span class="p">():</span>
<span class="n">default_generator</span> <span class="o">=</span> <span class="n">_get_generator</span><span class="p">(</span><span class="n">final_device</span><span class="p">)</span>
<span class="n">default_generator</span><span class="o">.</span><span class="n">set_offset</span><span class="p">(</span><span class="n">offset</span><span class="p">)</span>

<span class="n">_lazy_call</span><span class="p">(</span><span class="n">cb</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">_get_rng_state_offset</span><span class="p">(</span><span class="n">device</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">]</span> <span class="o">=</span> <span class="s1">&#39;cuda&#39;</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Returns the random number generator state offset of the specified GPU.</span>

<span class="sd"> Args:</span>
<span class="sd"> device (torch.device or int, optional): The device to return the RNG state offset of.</span>
<span class="sd"> Default: ``&#39;cuda&#39;`` (i.e., ``torch.device(&#39;cuda&#39;)``, the current CUDA device).</span>

<span class="sd"> .. warning::</span>
<span class="sd"> This function eagerly initializes CUDA.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">_lazy_init</span><span class="p">()</span>
<span class="n">final_device</span> <span class="o">=</span> <span class="n">_get_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">default_generator</span> <span class="o">=</span> <span class="n">_get_generator</span><span class="p">(</span><span class="n">final_device</span><span class="p">)</span>
<span class="k">return</span> <span class="n">default_generator</span><span class="o">.</span><span class="n">get_offset</span><span class="p">()</span>


<span class="kn">from</span> <span class="nn">.memory</span> <span class="kn">import</span> <span class="o">*</span> <span class="c1"># noqa: F403</span>


Expand Down
2 changes: 2 additions & 0 deletions docs/main/_modules/torch/distributed.html
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,8 @@ <h1>Source code for torch.distributed</h1><div class="highlight"><pre>
<span class="n">_create_process_group_wrapper</span><span class="p">,</span>
<span class="n">_rank_not_in_group</span><span class="p">,</span>
<span class="n">_c10d_error_logger</span><span class="p">,</span>
<span class="n">_coalescing_manager</span><span class="p">,</span>
<span class="n">_CoalescingManager</span><span class="p">,</span>
<span class="p">)</span>

<span class="kn">from</span> <span class="nn">.rendezvous</span> <span class="kn">import</span> <span class="p">(</span>
Expand Down
Loading

0 comments on commit 7ed53a6

Please sign in to comment.