Skip to content

Commit

Permalink
docstrings for container and batchnorm
Browse files Browse the repository at this point in the history
  • Loading branch information
soumith committed Sep 16, 2016
1 parent a0a2d98 commit b5f7720
Show file tree
Hide file tree
Showing 5 changed files with 216 additions and 59 deletions.
10 changes: 8 additions & 2 deletions README.md
@@ -1,4 +1,4 @@
# pytorch [alpha-2]
# pytorch [alpha-3]

| Python | **`Linux CPU`** | **`Linux GPU`** |
|--------|--------------------|------------------|
Expand Down Expand Up @@ -29,6 +29,12 @@ pip install -r requirements.txt
pip install .
```

## Getting Started
A more comprehensive Getting Started section will be filled in soon.
For now, there's two pointers:
- The MNIST example: [https://github.com/pytorch/examples](https://github.com/pytorch/examples)
- The API Reference: [http://pytorch.org/api/](http://pytorch.org/api/)

## Communication
* github issues: bug reports, feature requests, install issues, RFCs, thoughts, etc.
* slack: general chat, online discussions, collaboration etc. https://pytorch.slack.com/ . If you need a slack invite, ping me at soumith@pytorch.org
Expand All @@ -42,7 +48,7 @@ After that, we will reevaluate progress, and if we are ready, we will hit beta-0
* ~~alpha-0: Working versions of torch, cutorch, nn, cunn, optim fully unit tested with seamless numpy conversions~~
* ~~alpha-1: Serialization to/from disk with sharing intact. initial release of the new neuralnets package based on a Chainer-like design~~
* ~~alpha-2: sharing tensors across processes for hogwild training or data-loading processes. a rewritten optim package for this new nn.~~
* alpha-3: binary installs (prob will take @alexbw 's help here), contbuilds, etc.
* ~~alpha-3: binary installs, contbuilds, etc.
* alpha-4: a ton of examples across vision, nlp, speech, RL -- this phase might make us rethink parts of the APIs, and hence want to do this in alpha than beta
* alpha-5: Putting a simple and efficient story around multi-machine training. Probably simplistic like torch-distlearn. Building the website, release scripts, more documentation, etc.
* alpha-6: [no plan yet]
Expand Down
55 changes: 36 additions & 19 deletions docs/docutils/doc2md.py
Expand Up @@ -190,17 +190,22 @@ def make_toc(sections):

def _doc2md(lines, shiftlevel=0):
_doc2md.md = []
md = _doc2md.md
_doc2md.is_code = False
_doc2md.is_code_block = False
_doc2md.is_args = False
_doc2md.is_returns = False
_doc2md.is_inputshape = False
_doc2md.is_outputshape = False
_doc2md.code = []
def reset():
if _doc2md.is_code:
_doc2md.is_code = False
_doc2md.md += ['']
_doc2md.md += doc_code_block(code, 'python')
_doc2md.code += doc_code_block(code, 'python')
_doc2md.code += ['']
if _doc2md.is_code_block:
_doc2md.is_code_block = False
_doc2md.code += doc_code_block(code_block, 'python')
_doc2md.code += ['']

if _doc2md.is_args:
_doc2md.is_args = False
Expand All @@ -218,43 +223,49 @@ def reset():
if is_args_check(line):
reset()
_doc2md.is_args = True
md += ['']
md += ['#' * (shiftlevel+2) + ' Constructor Arguments']
_doc2md.md += ['']
_doc2md.md += ['#' * (shiftlevel+2) + ' Constructor Arguments']
args = []
elif is_returns_check(line):
reset()
_doc2md.is_returns = True
md += ['']
md += ['#' * (shiftlevel+2) + ' Returns']
_doc2md.md += ['']
_doc2md.md += ['#' * (shiftlevel+2) + ' Returns']
returns = []
elif is_example_check(line):
reset()
#md += [line]
elif is_inputshape_check(line):
reset()
inputshape = re.findall(r'\s*Input\sShape:\s*(.*)\s*:\s*(.*)\s*$', line)[0]
elif is_outputshape_check(line):
reset()
outputshape = re.findall(r'\s*Output\sShape:\s*(.*)\s*:\s*(.*)\s*$', line)[0]
md += ['']
md += ['#' * (shiftlevel+2) + ' Expected Shape']
md += [' | Shape | Description ']
md += ['------ | ----- | ------------']
md += [' input | ' + inputshape[0] + ' | ' + inputshape[1]]
md += ['output | ' + outputshape[0] + ' | ' + outputshape[1]]
_doc2md.md += ['']
_doc2md.md += ['#' * (shiftlevel+2) + ' Expected Shape']
_doc2md.md += [' | Shape | Description ']
_doc2md.md += ['------ | ----- | ------------']
_doc2md.md += [' input | ' + inputshape[0] + ' | ' + inputshape[1]]
_doc2md.md += ['output | ' + outputshape[0] + ' | ' + outputshape[1]]
elif is_image_check(line):
reset()
md += ['']
_doc2md.md += ['']
filename = re.findall(r'\s*Image:\s*(.*?)\s*$', line)
md += ['<img src="image/' + filename[0] + '" >']
_doc2md.md += ['<img src="image/' + filename[0] + '" >']
elif _doc2md.is_code == False and trimmed.startswith('>>> '):
reset()
_doc2md.is_code = True
code = [line]
elif _doc2md.is_code_block == False and trimmed.startswith('```'):
reset()
_doc2md.is_code_block = True
code_block = []
elif _doc2md.is_code_block == True and trimmed.startswith('```'):
# end of code block
reset()
elif shiftlevel != 0 and is_heading(line):
reset()
level, title = get_heading(line)
md += [make_heading(level + shiftlevel, title)]
_doc2md.md += [make_heading(level + shiftlevel, title)]
elif _doc2md.is_args:
if line:
args.append(line)
Expand All @@ -270,11 +281,17 @@ def reset():
code.append(line)
else:
reset()
elif _doc2md.is_code_block:
if line:
code_block.append(line)
else:
reset()
else:
reset()
md += [line]
_doc2md.md += [line]
reset()
return md
_doc2md.code += _doc2md.md
return _doc2md.code

def doc2md(docstr, title, min_level=1, more_info=False, toc=True):
"""
Expand Down
80 changes: 42 additions & 38 deletions torch/nn/modules/activation.py
Expand Up @@ -13,10 +13,10 @@ class Threshold(Module):
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place
Returns:
Tensor of same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
Tensor of same dimension and shape as the input
Examples:
>>> m = nn.Threshold(0.1, 20)
>>> input = Variable(torch.randn(2))
Expand All @@ -38,10 +38,10 @@ class ReLU(Threshold):
"""Applies the rectified linear unit function element-wise ReLU(x)= max(0,x)
Args:
inplace: can optionally do the operation in-place
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: relu.png
Examples:
>>> m = nn.ReLU()
Expand All @@ -64,10 +64,10 @@ class Hardtanh(Module):
min_value: minimum value of the linear region range
max_value: maximum value of the linear region range
inplace: can optionally do the operation in-place
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: htanh.png
Examples:
>>> m = nn.HardTanh(-2, 2)
Expand All @@ -90,10 +90,10 @@ class ReLU6(Hardtanh):
"""Applies the element-wise function ReLU6(x) = min( max(0,x), 6)
Args:
inplace: can optionally do the operation in-place
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: relu6.png
Examples:
>>> m = nn.ReLU6()
Expand All @@ -107,10 +107,10 @@ def __init__(self, inplace=False):

class Sigmoid(Module):
"""Applies the element-wise function sigmoid(x) = 1 / ( 1 + exp(-x))
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: sigmoid.png
Examples:
>>> m = nn.Sigmoid()
Expand All @@ -124,10 +124,10 @@ def forward(self, input):

class Tanh(Module):
"""Applies element-wise, Tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: tanh.png
Examples:
>>> m = nn.Tanh()
Expand All @@ -144,10 +144,10 @@ class ELU(Module):
Args:
alpha: the alpha value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: elu.png
Examples:
>>> m = nn.ELU()
Expand All @@ -171,10 +171,10 @@ class Hardshrink(Module):
f(x) = 0, otherwise
Args:
lambd: the lambda value for the Hardshrink formulation. Default: 0.5
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: hshrink.png
Examples:
>>> m = nn.Hardshrink()
Expand All @@ -195,10 +195,10 @@ class LeakyReLU(Module):
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Examples:
>>> m = nn.LeakyReLU(0.1)
>>> input = autograd.Variable(torch.randn(2))
Expand All @@ -216,10 +216,10 @@ def forward(self, input):

class LogSigmoid(Module):
"""Applies element-wise LogSigmoid(x) = log( 1 / (1 + exp(-x_i)))
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: logsigmoid.png
Examples:
>>> m = nn.LogSigmoid()
Expand All @@ -240,10 +240,10 @@ class Softplus(Module):
Args:
beta: the beta value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: softplus.png
Examples:
>>> m = nn.Softplus()
Expand All @@ -267,10 +267,10 @@ class Softshrink(Module):
f(x) = 0, otherwise
Args:
lambd: the lambda value for the Softshrink formulation. Default: 0.5
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: sshrink.png
Examples:
>>> m = nn.Softshrink()
Expand All @@ -297,10 +297,10 @@ class PReLU(Module):
Args:
num_parameters: number of "a" to learn. Default: 1
init: the initial value of "a". Default: 0.25
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: prelu.png
Examples:
>>> m = nn.PReLU()
Expand All @@ -319,10 +319,10 @@ def forward(self, input):

class Softsign(Module):
"""Applies element-wise, the function Softsign(x) = x / (1 + |x|)
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Image: softsign.png
Examples:
>>> m = nn.Softsign()
Expand All @@ -336,10 +336,10 @@ def forward(self, input):

class Tanhshrink(Module):
"""Applies element-wise, Tanhshrink(x) = x - Tanh(x)
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: Any : Tensor of any size and dimension
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input
Examples:
>>> m = nn.Tanhshrink()
>>> input = autograd.Variable(torch.randn(2))
Expand All @@ -357,10 +357,11 @@ class Softmin(Module):
lie in the range (0,1) and sum to 1
Softmin(x) = exp(-x_i - shift) / sum_j exp(-x_j - shift)
where shift = max_i - x_i
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: [ * , * ] : 2D Tensor of any size
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Image: softmin.png
Examples:
>>> m = nn.Softmin()
Expand All @@ -380,10 +381,11 @@ class Softmax(Module):
Softmax is defined as f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift)
where shift = max_i x_i
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: [ * , * ] : 2D Tensor of any size
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Image: softmax.png
Notes:
Note that this module doesn't work directly with NLLLoss,
Expand All @@ -405,10 +407,11 @@ class Softmax2d(Module):
When given an image of Channels x Height x Width, it will
apply Softmax to each location [Channels, h_i, w_j]
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: [ * , * , * , * ] : 4D Tensor of any size
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Examples:
>>> m = nn.Softmax2d()
>>> # you softmax over the 2nd dimension
Expand All @@ -424,10 +427,11 @@ class LogSoftmax(Module):
"""Applies the Log(Softmax(x)) function to an n-dimensional input Tensor.
The LogSoftmax formulation can be simplified as
f_i(x) = log(1 / a * exp(x_i)) where a = sum_j exp(x_j) .
Returns:
a Tensor of the same dimension and shape as the input
Input Shape: [ * , * ] : 2D Tensor of any size
Output Shape: Same : Output has the same shape as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Image: logsoftmax.png
Examples:
>>> m = nn.LogSoftmax()
Expand Down

0 comments on commit b5f7720

Please sign in to comment.