Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added .DS_Store
Binary file not shown.
467 changes: 467 additions & 0 deletions .ipynb_checkpoints/LV3-checkpoint.ipynb

Large diffs are not rendered by default.

526 changes: 526 additions & 0 deletions .ipynb_checkpoints/LV3_0.12-checkpoint.ipynb

Large diffs are not rendered by default.

647 changes: 647 additions & 0 deletions .ipynb_checkpoints/XOR_gate-checkpoint.ipynb

Large diffs are not rendered by default.

159 changes: 159 additions & 0 deletions .ipynb_checkpoints/core_code-checkpoint.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import numpy as np\n",
"tf.set_random_seed(55)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"#构建网络\n",
"D_input = 2\n",
"D_label = 1\n",
"D_hidden = 2\n",
"lr=1e-4\n",
"\n",
"x = tf.placeholder(tf.float32, [None, D_input], name=\"x\")\n",
"t = tf.placeholder(tf.float32, [None, D_label], name=\"t\")\n",
" \n",
"W_h1 = tf.Variable(tf.truncated_normal([D_input, D_hidden], stddev=0.1), name=\"W_h\")\n",
"b_h1 = tf.Variable(tf.constant(0.1, shape=[D_hidden]), name=\"b_h\")\n",
"pre_act_h1 = tf.matmul(x, W_h1) + b_h1\n",
"act_h1 = tf.nn.relu(pre_act_h1, name='act_h')\n",
" \n",
"W_o = tf.Variable(tf.truncated_normal([D_hidden, D_label], stddev=0.1), name=\"W_o\")\n",
"b_o = tf.Variable(tf.constant(0.1, shape=[D_label]), name=\"b_o\")\n",
"pre_act_o = tf.matmul(act_h1, W_o) + b_o\n",
"y = tf.nn.relu(pre_act_o, name='act_y')\n",
" \n",
"loss=tf.reduce_mean((y-t)**2)\n",
" \n",
"train_step = tf.train.AdamOptimizer(lr).minimize(loss)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"#生成数据\n",
"X=[[0,0],[0,1],[1,0],[1,1]]\n",
"Y=[[0],[1],[1],[0]]\n",
"X=np.array(X).astype('int16')\n",
"Y=np.array(Y).astype('int16')"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"#加载\n",
"sess = tf.InteractiveSession()\n",
"sess.run(tf.global_variables_initializer())"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"#训练\n",
"for i in range(20000):\n",
" sess.run(train_step,feed_dict={x:X,t:Y} )"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[ 0.01419991],\n",
" [ 0.97204113],\n",
" [ 0.97204131],\n",
" [ 0.04118853]], dtype=float32)"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#计算预测值\n",
"sess.run(y,feed_dict={x:X})"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[ 0.96414495, 1.01730847],\n",
" [ 0. , 0.50864619],\n",
" [ 0. , 0.50864631],\n",
" [ 0. , 0. ]], dtype=float32)"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#查看隐藏层的输出\n",
"sess.run(act_h1,feed_dict={x:X})"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"#用完关闭session\n",
"sess.close()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.14"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
24 changes: 12 additions & 12 deletions FNN.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ def bias_init(self,shape):
def variable_summaries(self, var, name):
with tf.name_scope(name+'_summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope(name+'_stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('_stddev/' + name, stddev)
tf.scalar_summary('_max/' + name, tf.reduce_max(var))
tf.scalar_summary('_min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
tf.summary.scalar('_stddev/' + name, stddev)
tf.summary.scalar('_max/' + name, tf.reduce_max(var))
tf.summary.scalar('_min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)

def layer(self,in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):
with tf.name_scope(layer_name):
Expand All @@ -85,9 +85,9 @@ def layer(self,in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):
self.variable_summaries(biases, layer_name + '/biases')
with tf.name_scope(layer_name+'_Wx_plus_b'):
pre_activate = tf.matmul(in_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', pre_activate)
tf.summary.histogram(layer_name + '/pre_activations', pre_activate)
activations = act(pre_activate, name='activation')
tf.histogram_summary(layer_name + '/activations', activations)
tf.summary.histogram(layer_name + '/activations', activations)
return activations, tf.nn.l2_loss(weights)

def drop_layer(self,in_tensor):
Expand Down Expand Up @@ -129,25 +129,25 @@ def build(self, prefix):
with tf.name_scope('total_l2'):
for l2 in self.total_l2:
self.l2_penalty+=l2
tf.scalar_summary('l2_penalty', self.l2_penalty)
tf.summary.scalar_('l2_penalty', self.l2_penalty)

if self.Task_type=='regression':
with tf.name_scope('SSE'):
self.loss=tf.reduce_mean(tf.nn.l2_loss((self.output - self.labels)))
tf.scalar_summary('loss', self.loss)
tf.summary.scalar('loss', self.loss)
else:
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.output, labels=self.labels)
with tf.name_scope('cross entropy'):
self.loss = tf.reduce_mean(entropy)
tf.scalar_summary('loss', self.loss)
tf.summary.scalar('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(self.output, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', self.accuracy)
tf.summary.scalar('accuracy', self.accuracy)

with tf.name_scope('total_loss'):
self.total_loss=self.loss + self.l2_penalty*self.L2_lambda
tf.scalar_summary('total_loss', self.total_loss)
tf.summary.scalar_('total_loss', self.total_loss)

#train
with tf.name_scope('train'):
Expand Down
64 changes: 33 additions & 31 deletions LV3.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,14 @@
"import tensorflow as tf\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
"%matplotlib inline\n",
"tf.reset_default_graph()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"metadata": {},
"outputs": [],
"source": [
"class FNN(object):\n",
Expand Down Expand Up @@ -86,14 +85,14 @@
" def variable_summaries(self, var, name):\n",
" with tf.name_scope(name+'_summaries'):\n",
" mean = tf.reduce_mean(var)\n",
" tf.scalar_summary('mean/' + name, mean)\n",
" tf.summary.scalar('mean/' + name, mean)\n",
" with tf.name_scope(name+'_stddev'):\n",
" stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n",
" # 记录每次训练后变量的数值变化\n",
" tf.scalar_summary('_stddev/' + name, stddev)\n",
" tf.scalar_summary('_max/' + name, tf.reduce_max(var))\n",
" tf.scalar_summary('_min/' + name, tf.reduce_min(var))\n",
" tf.histogram_summary(name, var)\n",
" tf.summary.scalar('_stddev/' + name, stddev)\n",
" tf.summary.scalar_('_max/' + name, tf.reduce_max(var))\n",
" tf.summary.scalar_('_min/' + name, tf.reduce_min(var))\n",
" tf.summary.histogram(name, var)\n",
"\n",
" def layer(self,in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):\n",
" with tf.name_scope(layer_name):\n",
Expand All @@ -113,10 +112,10 @@
" # 计算Wx+b\n",
" pre_activate = tf.matmul(in_tensor, weights) + biases\n",
" # 记录直方图\n",
" tf.histogram_summary(layer_name + '/pre_activations', pre_activate)\n",
" tf.summary.histogram(layer_name + '/pre_activations', pre_activate)\n",
" # 计算a(Wx+b)\n",
" activations = act(pre_activate, name='activation')\n",
" tf.histogram_summary(layer_name + '/activations', activations)\n",
" tf.summary.histogram(layer_name + '/activations', activations)\n",
" # 最终返回该层的输出,以及权重W的L2\n",
" return activations, tf.nn.l2_loss(weights)\n",
"\n",
Expand Down Expand Up @@ -174,7 +173,7 @@
" with tf.name_scope('total_l2'):\n",
" for l2 in self.total_l2:\n",
" self.l2_penalty+=l2\n",
" tf.scalar_summary('l2_penalty', self.l2_penalty)\n",
" tf.summary.scalar('l2_penalty', self.l2_penalty)\n",
" \n",
" # 不同任务的loss\n",
" # 若为回归,则loss是用于判断所有预测值和实际值差别的函数。\n",
Expand All @@ -183,22 +182,22 @@
" self.loss=tf.reduce_mean((self.output-self.labels)**2)\n",
" self.loss2=tf.nn.l2_loss(self.output-self.labels)\n",
" \n",
" tf.scalar_summary('loss', self.loss)\n",
" tf.summary.scalar('loss', self.loss)\n",
" else:\n",
" # 若为分类,cross entropy的loss function\n",
" entropy = tf.nn.softmax_cross_entropy_with_logits(self.output, self.labels)\n",
" with tf.name_scope('cross entropy'):\n",
" self.loss = tf.reduce_mean(entropy)\n",
" tf.scalar_summary('loss', self.loss)\n",
" tf.summary.scalar('loss', self.loss)\n",
" with tf.name_scope('accuracy'):\n",
" correct_prediction = tf.equal(tf.argmax(self.output, 1), tf.argmax(self.labels, 1))\n",
" self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
" tf.scalar_summary('accuracy', self.accuracy)\n",
" tf.summary.scalar('accuracy', self.accuracy)\n",
" \n",
" # 整合所有loss,形成最终loss\n",
" with tf.name_scope('total_loss'):\n",
" self.total_loss=self.loss + self.l2_penalty*self.L2_lambda\n",
" tf.scalar_summary('total_loss', self.total_loss)\n",
" tf.summary.scalar('total_loss', self.total_loss)\n",
" \n",
" # 训练操作\n",
" with tf.name_scope('train'):\n",
Expand Down Expand Up @@ -263,9 +262,7 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"metadata": {},
"outputs": [
{
"name": "stdout",
Expand All @@ -282,9 +279,7 @@
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"metadata": {},
"outputs": [
{
"name": "stdout",
Expand Down Expand Up @@ -319,18 +314,16 @@
"outputs": [],
"source": [
"sess = tf.InteractiveSession()\n",
"tf.initialize_all_variables().run()\n",
"merged = tf.merge_all_summaries()\n",
"train_writer = tf.train.SummaryWriter('log3' + '/train',sess.graph)\n",
"test_writer = tf.train.SummaryWriter('log3' + '/test')"
"sess.run(tf.global_variables_initializer())\n",
"merged = tf.summary.merge_all()\n",
"train_writer = tf.summary.FileWriter('log3' + '/train',sess.graph)\n",
"test_writer = tf.summary.FileWriter('log3' + '/test')"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"metadata": {},
"outputs": [],
"source": [
"def plots(T,P,i, n=21,length=400):\n",
Expand Down Expand Up @@ -360,7 +353,6 @@
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [
Expand Down Expand Up @@ -439,6 +431,16 @@
" test_writer.add_summary(summary, k)\n",
" print('epoch%s | train_loss:%s |test_loss:%s' %(i,sess.run(ff.loss,feed_dict={ff.inputs:X0,ff.labels:Y0,ff.drop_keep_rate:1.0}),sess.run(ff.loss,feed_dict={ff.inputs:X_test,ff.labels:Y_test,ff.drop_keep_rate:1.0})))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#用完关闭session\n",
"sess.close()"
]
}
],
"metadata": {
Expand All @@ -457,7 +459,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.12"
"version": "2.7.14"
}
},
"nbformat": 4,
Expand Down
Loading