Skip to content

Commit bf03475

Browse files
committed
Modification suggested by Ji'an
1 parent db35fe8 commit bf03475

File tree

18 files changed

+43
-38
lines changed

18 files changed

+43
-38
lines changed

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ build
44
reference
55
teaching
66
workspace.code-workspace
7+
__pycache__/
78
source/.vscode
89
source/_future
910
source/_static/code/.idea

source/_static/code/en/extended/save_and_restore/mnist.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import tensorflow as tf
22
import numpy as np
3-
from model.mlp.mlp import MLP
4-
from model.mlp.utils import DataLoader
3+
from en.model.mlp.mlp import MLP
4+
from en.model.mlp.utils import DataLoader
55

66
tf.enable_eager_execution()
77
mode = 'test'

source/_static/code/en/extended/tensorboard/mnist.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import tensorflow as tf
22
import numpy as np
3-
from model.mlp.mlp import MLP
4-
from model.mlp.utils import DataLoader
3+
from en.model.mlp.mlp import MLP
4+
from en.model.mlp.utils import DataLoader
55

66
tf.enable_eager_execution()
77
num_batches = 10000

source/_static/code/en/model/cnn/cnn.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
class CNN(tf.keras.Model):
55
def __init__(self):
6-
super(CNN, self).__init__()
6+
super().__init__()
77
self.conv1 = tf.keras.layers.Conv2D(
88
filters=32, # 卷积核数目
99
kernel_size=[5, 5], # 感受野大小
@@ -18,9 +18,9 @@ def __init__(self):
1818
activation=tf.nn.relu
1919
)
2020
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
21-
self.flatten = tf.keras.layers.Reshape(target_shape=(-1, 7 * 7 * 64))
21+
self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
2222
self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
23-
self.dense2 = tf.layers.Dense(units=10)
23+
self.dense2 = tf.keras.layers.Dense(units=10)
2424

2525
def call(self, inputs):
2626
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
@@ -30,9 +30,9 @@ def call(self, inputs):
3030
x = self.pool2(x) # [batch_size, 7, 7, 64]
3131
x = self.flatten(x) # [batch_size, 7 * 7 * 64]
3232
x = self.dense1(x) # [batch_size, 1024]
33-
x = self.dense2(x) # [batch_size, 64]
33+
x = self.dense2(x) # [batch_size, 10]
3434
return x
3535

3636
def predict(self, inputs):
37-
logits = self.call(inputs)
37+
logits = self(inputs)
3838
return tf.argmax(logits, axis=-1)

source/_static/code/en/model/mlp/main.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import tensorflow as tf
22
import numpy as np
3-
from model.mlp.mlp import MLP
4-
from model.cnn.cnn import CNN
3+
from en.model.mlp.mlp import MLP
4+
from en.model.cnn.cnn import CNN
55

66
tf.enable_eager_execution()
77
model_type = 'CNN'

source/_static/code/en/model/mlp/mlp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
class MLP(tf.keras.Model):
55
def __init__(self):
6-
super(MLP, self).__init__()
6+
super().__init__()
77
self.dense1 = tf.keras.layers.Dense(units=100, activation=tf.nn.relu)
88
self.dense2 = tf.keras.layers.Dense(units=10)
99

source/_static/code/en/model/rl/rl.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# Q-network用于拟合Q函数,和前节的多层感知机类似。输入state,输出各个action下的Q-value(CartPole下为2维)。
1919
class QNetwork(tf.keras.Model):
2020
def __init__(self):
21-
super(QNetwork, self).__init__()
21+
super().__init__()
2222
self.dense1 = tf.keras.layers.Dense(units=24, activation=tf.nn.relu)
2323
self.dense2 = tf.keras.layers.Dense(units=24, activation=tf.nn.relu)
2424
self.dense3 = tf.keras.layers.Dense(units=2)
@@ -30,7 +30,7 @@ def call(self, inputs):
3030
return x
3131

3232
def predict(self, inputs):
33-
q_values = self.call(inputs)
33+
q_values = self(inputs)
3434
return tf.argmax(q_values, axis=-1)
3535

3636

source/_static/code/en/model/rnn/rnn.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@
1212

1313
class RNN(tf.keras.Model):
1414
def __init__(self, num_chars):
15-
super(RNN, self).__init__()
15+
super().__init__()
1616
self.num_chars = num_chars
1717
self.cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=256)
18-
self.dense = tf.layers.Dense(units=self.num_chars)
18+
self.dense = tf.keras.layers.Dense(units=self.num_chars)
1919

2020
def call(self, inputs):
2121
batch_size, seq_length = tf.shape(inputs)
@@ -28,7 +28,7 @@ def call(self, inputs):
2828

2929
def predict(self, inputs, temperature=1.):
3030
batch_size, _ = tf.shape(inputs)
31-
logits = self.call(inputs)
31+
logits = self(inputs)
3232
prob = tf.nn.softmax(logits / temperature).numpy()
3333
return np.array([np.random.choice(self.num_chars, p=prob[i, :])
3434
for i in range(batch_size.numpy())])

source/_static/code/zh/extended/save_and_restore/mnist.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import tensorflow as tf
22
import numpy as np
3-
from model.mlp.mlp import MLP
4-
from model.mlp.utils import DataLoader
3+
from zh.model.mlp.mlp import MLP
4+
from zh.model.mlp.utils import DataLoader
55

66
tf.enable_eager_execution()
77
mode = 'test'

source/_static/code/zh/extended/tensorboard/mnist.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import tensorflow as tf
22
import numpy as np
3-
from model.mlp.mlp import MLP
4-
from model.mlp.utils import DataLoader
3+
from zh.model.mlp.mlp import MLP
4+
from zh.model.mlp.utils import DataLoader
55

66
tf.enable_eager_execution()
77
num_batches = 10000

source/_static/code/zh/model/cnn/cnn.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
class CNN(tf.keras.Model):
55
def __init__(self):
6-
super(CNN, self).__init__()
6+
super().__init__()
77
self.conv1 = tf.keras.layers.Conv2D(
88
filters=32, # 卷积核数目
99
kernel_size=[5, 5], # 感受野大小
@@ -18,9 +18,9 @@ def __init__(self):
1818
activation=tf.nn.relu
1919
)
2020
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
21-
self.flatten = tf.keras.layers.Reshape(target_shape=(-1, 7 * 7 * 64))
21+
self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
2222
self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
23-
self.dense2 = tf.layers.Dense(units=10)
23+
self.dense2 = tf.keras.layers.Dense(units=10)
2424

2525
def call(self, inputs):
2626
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
@@ -30,9 +30,9 @@ def call(self, inputs):
3030
x = self.pool2(x) # [batch_size, 7, 7, 64]
3131
x = self.flatten(x) # [batch_size, 7 * 7 * 64]
3232
x = self.dense1(x) # [batch_size, 1024]
33-
x = self.dense2(x) # [batch_size, 64]
33+
x = self.dense2(x) # [batch_size, 10]
3434
return x
3535

3636
def predict(self, inputs):
37-
logits = self.call(inputs)
37+
logits = self(inputs)
3838
return tf.argmax(logits, axis=-1)

source/_static/code/zh/model/mlp/main.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import tensorflow as tf
22
import numpy as np
3-
from model.mlp.mlp import MLP
4-
from model.cnn.cnn import CNN
3+
from zh.model.mlp.mlp import MLP
4+
from zh.model.cnn.cnn import CNN
55

66
tf.enable_eager_execution()
77
model_type = 'CNN'

source/_static/code/zh/model/mlp/mlp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
class MLP(tf.keras.Model):
55
def __init__(self):
6-
super(MLP, self).__init__()
6+
super().__init__()
77
self.dense1 = tf.keras.layers.Dense(units=100, activation=tf.nn.relu)
88
self.dense2 = tf.keras.layers.Dense(units=10)
99

source/_static/code/zh/model/rl/rl.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# Q-network用于拟合Q函数,和前节的多层感知机类似。输入state,输出各个action下的Q-value(CartPole下为2维)。
1919
class QNetwork(tf.keras.Model):
2020
def __init__(self):
21-
super(QNetwork, self).__init__()
21+
super().__init__()
2222
self.dense1 = tf.keras.layers.Dense(units=24, activation=tf.nn.relu)
2323
self.dense2 = tf.keras.layers.Dense(units=24, activation=tf.nn.relu)
2424
self.dense3 = tf.keras.layers.Dense(units=2)
@@ -30,7 +30,7 @@ def call(self, inputs):
3030
return x
3131

3232
def predict(self, inputs):
33-
q_values = self.call(inputs)
33+
q_values = self(inputs)
3434
return tf.argmax(q_values, axis=-1)
3535

3636

source/_static/code/zh/model/rnn/rnn.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@
1212

1313
class RNN(tf.keras.Model):
1414
def __init__(self, num_chars):
15-
super(RNN, self).__init__()
15+
super().__init__()
1616
self.num_chars = num_chars
1717
self.cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=256)
18-
self.dense = tf.layers.Dense(units=self.num_chars)
18+
self.dense = tf.keras.layers.Dense(units=self.num_chars)
1919

2020
def call(self, inputs):
2121
batch_size, seq_length = tf.shape(inputs)
@@ -28,7 +28,7 @@ def call(self, inputs):
2828

2929
def predict(self, inputs, temperature=1.):
3030
batch_size, _ = tf.shape(inputs)
31-
logits = self.call(inputs)
31+
logits = self(inputs)
3232
prob = tf.nn.softmax(logits / temperature).numpy()
3333
return np.array([np.random.choice(self.num_chars, p=prob[i, :])
3434
for i in range(batch_size.numpy())])

source/en/preface.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
Preface
22
=========
33

4-
Mar 30th, 2018, Google held the second TensorFlow Dev Summit in Mountain View, California and officially published TensorFlow version 1.8. I was fortunate to attend the summit with Google's sponsorship, witnessing this milestone new version release. Lots of new functions being added and supported shows the ambition of TensorFlow. Meanwhile Eager Execution that has been tested since 2017 fall, was finally added officially in this version and became the recommended tutorial mode to TensorFlow by official.
4+
On Mar 30th, 2018, Google held the second TensorFlow Dev Summit in Mountain View, California and announced the official release of TensorFlow version 1.8. I was fortunate to attend the summit with Google's sponsorship, witnessing the release of this milestone new version. Lots of new functions being added and supported shows the ambition of TensorFlow. Meanwhile, Eager Execution, which has been tested since 2017 fall, was finally included officially in this version and became the recommended mode for newcomers of TensorFlow.
55

66
The easiest way to get started with TensorFlow is using Eager Execution.
77

source/zh/models.rst

+6-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ TensorFlow模型
1414
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1515
.. https://www.tensorflow.org/programmers_guide/eager
1616
17-
如上一章所述,为了增强代码的可复用性,我们往往会将模型编写为类,然后在模型调用的地方使用 ``y_pred = model(X)`` 的形式进行调用。 **模型类** 的形式非常简单,主要包含 ``__init__()`` (构造函数,初始化)和 ``call(input)`` (模型调用)两个方法,但也可以根据需要增加自定义的方法。
17+
如上一章所述,为了增强代码的可复用性,我们往往会将模型编写为类,然后在模型调用的地方使用 ``y_pred = model(X)`` 的形式进行调用。 **模型类** 的形式非常简单,主要包含 ``__init__()`` (构造函数,初始化)和 ``call(input)`` (模型调用)两个方法,但也可以根据需要增加自定义的方法。 [#call]_
1818

1919
.. code-block:: python
2020
@@ -39,6 +39,8 @@ TensorFlow模型
3939

4040
如果我们需要显式地声明自己的变量并使用变量进行自定义运算,请参考 :ref:`自定义层 <custom_layer>`。
4141

42+
.. [#call] 在Python类中,对类的实例 ``myClass`` 进行形如 ``myClass()`` 的调用等价于 ``myClass.__call__()`` 。在这里,我们的模型继承了 ``tf.keras.Model`` 这一父类。该父类中包含 ``__call__()`` 的定义,其中调用了 ``call()`` 方法,同时进行了一些keras的内部操作。这里,我们通过继承 ``tf.keras.Model`` 并重载 ``call()`` 方法,即可在保持keras结构的同时加入模型调用的代码。具体请见本章初“前置知识”的 ``__call__()`` 部分。
43+
4244
.. _mlp:
4345

4446
基础示例:多层感知机(MLP)
@@ -129,7 +131,7 @@ TensorFlow模型
129131
- LSTM原理:`Understanding LSTM Networks <https://colah.github.io/posts/2015-08-Understanding-LSTMs/>`_
130132
- RNN序列生成:[Graves2013]_
131133

132-
这里,我们使用RNN来进行尼采风格文本的自动生成。
134+
这里,我们使用RNN来进行尼采风格文本的自动生成。 [#rnn_reference]_
133135

134136
这个任务的本质其实预测一段英文文本的接续字母的概率分布。比如,我们有以下句子::
135137

@@ -206,6 +208,8 @@ TensorFlow模型
206208
arn inneves to sya" natorne. hag open reals whicame oderedte,[fingo is
207209
zisternethta simalfule dereeg hesls lang-lyes thas quiin turjentimy; periaspedey tomm--whach
208210

211+
.. [#rnn_reference] 此处的任务及实现参考了 https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py
212+
209213
深度强化学习(DRL)
210214
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
211215

source/zh/static.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ TensorFlow本质上是一个符号式的(基于计算图的)计算框架。
2222
>>> b = 3
2323
a + b = 5
2424

25-
变量(Variable)是一种特殊类型的张量,使用 ``tf.get_variable()`` 建立,与编程语言中的变量很相似。使用变量前需要先初始化,变量内存储的值可以在计算图的计算过程中被修改。以下示例如何建立一个变量,将其值初始化为0,并逐次累加1。
25+
**变量**(Variable)是一种特殊类型的张量,使用 ``tf.get_variable()`` 建立,与编程语言中的变量很相似。使用变量前需要先初始化,变量内存储的值可以在计算图的计算过程中被修改。以下示例如何建立一个变量,将其值初始化为0,并逐次累加1。
2626
2727
.. literalinclude:: ../_static/code/zh/basic/graph/variable.py
2828

0 commit comments

Comments
 (0)