Skip to content

Commit

Permalink
[release] 1.2.0 for TL 1.7.5
Browse files Browse the repository at this point in the history
  • Loading branch information
zsdonghao committed Feb 22, 2018
1 parent 085d477 commit a1d2c2b
Show file tree
Hide file tree
Showing 42 changed files with 10,435 additions and 3,314 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

We run this script under [TensorFlow](https://www.tensorflow.org) 1.2 and the self-contained [TensorLayer](http://tensorlayer.readthedocs.io/en/latest/). If you got error, you may need to update TensorLayer.

<!---
⚠️ This repo will be merged into [tensorlayer](https://github.com/zsdonghao/tensorlayer) soon.
-->

### SRGAN Architecture

Expand Down
116 changes: 54 additions & 62 deletions main.py

Large diffs are not rendered by default.

185 changes: 70 additions & 115 deletions model.py

Large diffs are not rendered by default.

8 changes: 2 additions & 6 deletions tensorlayer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
"""
Deep learning and Reinforcement learning library for Researchers and Engineers
"""
"""Deep learning and Reinforcement learning library for Researchers and Engineers"""
from __future__ import absolute_import


try:
install_instr = "Please make sure you install a recent enough version of TensorFlow."
import tensorflow
Expand All @@ -15,7 +12,6 @@
from . import files
from . import iterate
from . import layers
from . import ops
from . import utils
from . import visualize
from . import prepro
Expand All @@ -27,7 +23,7 @@
act = activation
vis = visualize

__version__ = "1.7.3"
__version__ = "1.7.4"

global_flag = {}
global_dict = {}
7 changes: 7 additions & 0 deletions tensorlayer/_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import logging

logging.basicConfig(level=logging.INFO, format='[TL] %(message)s')


def info(fmt, *args):
logging.info(fmt, *args)
141 changes: 80 additions & 61 deletions tensorlayer/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,120 +3,139 @@

import tensorflow as tf

def identity(x, name=None):
"""The identity activation function, Shortcut is ``linear``.

def identity(x):
"""The identity activation function.
Shortcut is ``linear``.
Parameters
----------
x : a tensor input
input(s)
x : Tensor
input.
Returns
--------
A `Tensor` with the same type as `x`.
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
return x

# Shortcut
linear = identity

def ramp(x=None, v_min=0, v_max=1, name=None):
def ramp(x, v_min=0, v_max=1, name=None):
"""The ramp activation function.
Parameters
----------
x : a tensor input
input(s)
x : Tensor
input.
v_min : float
if input(s) smaller than v_min, change inputs to v_min
cap input to v_min as a lower bound.
v_max : float
if input(s) greater than v_max, change inputs to v_max
name : a string or None
An optional name to attach to this activation function.
cap input to v_max as a upper bound.
name : str
The function name (optional).
Returns
--------
A `Tensor` with the same type as `x`.
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name)

def leaky_relu(x=None, alpha=0.1, name="lrelu"):

def leaky_relu(x, alpha=0.1, name="lrelu"):
"""The LeakyReLU, Shortcut is ``lrelu``.
Modified version of ReLU, introducing a nonzero gradient for negative
input.
Modified version of ReLU, introducing a nonzero gradient for negative input.
Parameters
----------
x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
alpha : `float`. slope.
name : a string or None
An optional name to attach to this activation function.
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``,
``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
---------
>>> network = tl.layers.DenseLayer(network, n_units=100, name = 'dense_lrelu',
... act= lambda x : tl.act.lrelu(x, 0.2))
--------
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
------------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`_
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`__
"""
# with tf.name_scope(name) as scope:
# x = tf.nn.relu(x)
# m_x = tf.nn.relu(-x)
# x -= alpha * m_x
# x = tf.nn.relu(x)
# m_x = tf.nn.relu(-x)
# x -= alpha * m_x
x = tf.maximum(x, alpha * x, name=name)
return x

#Shortcut
lrelu = leaky_relu


def swish(x, name='swish'):
"""The Swish function, see `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`_.
"""The Swish function.
See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__.
Parameters
----------
x : a tensor input
input(s)
x : Tensor
input.
name: str
function name (optional).
Returns
--------
A `Tensor` with the same type as `x`.
-------
Tensor
A ``Tensor`` in the same type as ``x``.
"""
with tf.name_scope(name) as scope:
x = tf.nn.sigmoid(x) * x
with tf.name_scope(name):
x = tf.nn.sigmoid(x) * x
return x

def pixel_wise_softmax(output, name='pixel_wise_softmax'):

def pixel_wise_softmax(x, name='pixel_wise_softmax'):
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
Usually be used for image segmentation.
Parameters
------------
output : tensor
- For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2.
- For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2.
----------
x : Tensor
input.
- For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.
- For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.
name : str
function name (optional)
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
Examples
---------
--------
>>> outputs = pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
References
-----------
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`_
----------
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__
"""
with tf.name_scope(name) as scope:
return tf.nn.softmax(output)
## old implementation
# exp_map = tf.exp(output)
# if output.get_shape().ndims == 4: # 2d image
# evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True]))
# elif output.get_shape().ndims == 5: # 3d image
# evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True]))
# else:
# raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape))
# return tf.div(exp_map, evidence)
with tf.name_scope(name):
return tf.nn.softmax(x)


# Alias
linear = identity
lrelu = leaky_relu
1 change: 1 addition & 0 deletions tensorlayer/cli/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""The tensorlayer.cli module provides a command-line tool for some common tasks."""
13 changes: 13 additions & 0 deletions tensorlayer/cli/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import argparse
from tensorlayer.cli import train

if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='tl')
subparsers = parser.add_subparsers(dest='cmd')
train_parser = subparsers.add_parser('train', help='train a model using multiple local GPUs or CPUs.')
train.build_arg_parser(train_parser)
args = parser.parse_args()
if args.cmd == 'train':
train.main(args)
else:
parser.print_help()
Loading

0 comments on commit a1d2c2b

Please sign in to comment.