1.建立一個(gè)神經(jīng)網(wǎng)絡(luò)添加層
輸入值、輸入的大小、輸出的大小和激勵(lì)函數(shù)
學(xué)過(guò)神經(jīng)網(wǎng)絡(luò)的人看下面這個(gè)圖就明白了,不懂的去看看我的另一篇博客(http://www.cnblogs.com/wjy-lulu/p/6547542.html)
def add_layer(inputs , in_size , out_size , activate = None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))#隨機(jī)初始化
baises = tf.Variable(tf.zeros([1,out_size])+0.1)#可以隨機(jī)但是不要初始化為0,都為固定值比隨機(jī)好點(diǎn)
y = tf.matmul(inputs, Weights) + baises #matmul:矩陣乘法,multipy:一般是數(shù)量的乘法
if activate:
y = activate(y)
return y
2.訓(xùn)練一個(gè)二次函數(shù)
import tensorflow as tf
import numpy as np
def add_layer(inputs , in_size , out_size , activate = None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))#隨機(jī)初始化
baises = tf.Variable(tf.zeros([1,out_size])+0.1)#可以隨機(jī)但是不要初始化為0,都為固定值比隨機(jī)好點(diǎn)
y = tf.matmul(inputs, Weights) + baises #matmul:矩陣乘法,multipy:一般是數(shù)量的乘法
if activate:
y = activate(y)
return y
if __name__ == '__main__':
x_data = np.linspace(-1,1,300,dtype=np.float32)[:,np.newaxis]#創(chuàng)建-1,1的300個(gè)數(shù),此時(shí)為一維矩陣,后面轉(zhuǎn)化為二維矩陣===[1,2,3]-->>[[1,2,3]]
noise = np.random.normal(0,0.05,x_data.shape).astype(np.float32)#噪聲是(1,300)格式,0-0.05大小
y_data = np.square(x_data) - 0.5 + noise #帶有噪聲的拋物線
xs = tf.placeholder(tf.float32,[None,1]) #外界輸入數(shù)據(jù)
ys = tf.placeholder(tf.float32,[None,1])
l1 = add_layer(xs,1,10,activate=tf.nn.relu)
prediction = add_layer(l1,10,1,activate=None)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))#誤差
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)#對(duì)誤差進(jìn)行梯度優(yōu)化,步伐為0.1
sess = tf.Session()
sess.run( tf.global_variables_initializer())
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})#訓(xùn)練
if i%50 == 0:
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))#查看誤差
3.動(dòng)態(tài)顯示訓(xùn)練過(guò)程
顯示的步驟程序之中部分進(jìn)行說(shuō)明,其它說(shuō)明請(qǐng)看其它博客(http://www.cnblogs.com/wjy-lulu/p/7735987.html)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs , in_size , out_size , activate = None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))#隨機(jī)初始化
baises = tf.Variable(tf.zeros([1,out_size])+0.1)#可以隨機(jī)但是不要初始化為0,都為固定值比隨機(jī)好點(diǎn)
y = tf.matmul(inputs, Weights) + baises #matmul:矩陣乘法,multipy:一般是數(shù)量的乘法
if activate:
y = activate(y)
return y
if __name__ == '__main__':
x_data = np.linspace(-1,1,300,dtype=np.float32)[:,np.newaxis]#創(chuàng)建-1,1的300個(gè)數(shù),此時(shí)為一維矩陣,后面轉(zhuǎn)化為二維矩陣===[1,2,3]-->>[[1,2,3]]
noise = np.random.normal(0,0.05,x_data.shape).astype(np.float32)#噪聲是(1,300)格式,0-0.05大小
y_data = np.square(x_data) - 0.5 + noise #帶有噪聲的拋物線
fig = plt.figure('show_data')# figure("data")指定圖表名稱
ax = fig.add_subplot(111)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
xs = tf.placeholder(tf.float32,[None,1]) #外界輸入數(shù)據(jù)
ys = tf.placeholder(tf.float32,[None,1])
l1 = add_layer(xs,1,10,activate=tf.nn.relu)
prediction = add_layer(l1,10,1,activate=None)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))#誤差
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)#對(duì)誤差進(jìn)行梯度優(yōu)化,步伐為0.1
sess = tf.Session()
sess.run( tf.global_variables_initializer())
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})#訓(xùn)練
if i%50 == 0:
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction, feed_dict={xs: x_data})
lines = ax.plot(x_data,prediction_value,"r",lw = 3)
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))#查看誤差
plt.pause(2)
while True:
plt.pause(0.01)
4.TensorBoard整體結(jié)構(gòu)化顯示
A.利用with tf.name_scope("name")創(chuàng)建大結(jié)構(gòu)、利用函數(shù)的name="name"去創(chuàng)建小結(jié)構(gòu):tf.placeholder(tf.float32,[None,1],name="x_data")
B.利用writer = tf.summary.FileWriter("G:/test/",graph=sess.graph)創(chuàng)建一個(gè)graph文件
C.利用TessorBoard去執(zhí)行這個(gè)文件
這里得注意--->>>首先到你存放文件的上一個(gè)目錄--->>然后再去運(yùn)行這個(gè)文件
tensorboard --logdir=test
(被屏蔽的GIF動(dòng)圖,具體安裝操作歡迎戳“原文鏈接”哈!)
5.TensorBoard局部結(jié)構(gòu)化顯示
A. tf.summary.histogram(layer_name+"Weight",Weights):直方圖顯示
B. tf.summary.scalar("Loss",loss):折線圖顯示,loss的走向決定你的網(wǎng)絡(luò)訓(xùn)練的好壞,至關(guān)重要一點(diǎn)
C.初始化與運(yùn)行設(shè)定的圖表
merge = tf.summary.merge_all()#合并圖表2 writer = tf.summary.FileWriter("G:/test/",graph=sess.graph)#寫(xiě)進(jìn)文件3 result = sess.run(merge,feed_dict={xs:x_data,ys:y_data})#運(yùn)行打包的圖表merge4 writer.add_summary(result,i)#寫(xiě)入文件,并且單步長(zhǎng)50
完整代碼及顯示效果:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs , in_size , out_size , n_layer = 1 , activate = None):
layer_name = "layer" + str(n_layer)
with tf.name_scope(layer_name):
with tf.name_scope("Weights"):
Weights = tf.Variable(tf.random_normal([in_size,out_size]),name="W")#隨機(jī)初始化
tf.summary.histogram(layer_name+"Weight",Weights)
with tf.name_scope("Baises"):
baises = tf.Variable(tf.zeros([1,out_size])+0.1,name="B")#可以隨機(jī)但是不要初始化為0,都為固定值比隨機(jī)好點(diǎn)
tf.summary.histogram(layer_name+"Baises",baises)
y = tf.matmul(inputs, Weights) + baises #matmul:矩陣乘法,multipy:一般是數(shù)量的乘法
if activate:
y = activate(y)
tf.summary.histogram(layer_name+"y_sum",y)
return y
if __name__ == '__main__':
x_data = np.linspace(-1,1,300,dtype=np.float32)[:,np.newaxis]#創(chuàng)建-1,1的300個(gè)數(shù),此時(shí)為一維矩陣,后面轉(zhuǎn)化為二維矩陣===[1,2,3]-->>[[1,2,3]]
noise = np.random.normal(0,0.05,x_data.shape).astype(np.float32)#噪聲是(1,300)格式,0-0.05大小
y_data = np.square(x_data) - 0.5 + noise #帶有噪聲的拋物線
fig = plt.figure('show_data')# figure("data")指定圖表名稱
ax = fig.add_subplot(111)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
with tf.name_scope("inputs"):
xs = tf.placeholder(tf.float32,[None,1],name="x_data") #外界輸入數(shù)據(jù)
ys = tf.placeholder(tf.float32,[None,1],name="y_data")
l1 = add_layer(xs,1,10,n_layer=1,activate=tf.nn.relu)
prediction = add_layer(l1,10,1,n_layer=2,activate=None)
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))#誤差
tf.summary.scalar("Loss",loss)
with tf.name_scope("train_step"):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)#對(duì)誤差進(jìn)行梯度優(yōu)化,步伐為0.1
sess = tf.Session()
merge = tf.summary.merge_all()#合并
writer = tf.summary.FileWriter("G:/test/",graph=sess.graph)
sess.run( tf.global_variables_initializer())
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})#訓(xùn)練
if i%100 == 0:
result = sess.run(merge,feed_dict={xs:x_data,ys:y_data})#運(yùn)行打包的圖表merge
writer.add_summary(result,i)#寫(xiě)入文件,并且單步長(zhǎng)50
-
神經(jīng)網(wǎng)絡(luò)
+關(guān)注
關(guān)注
42文章
4777瀏覽量
100966 -
tensorflow
+關(guān)注
關(guān)注
13文章
329瀏覽量
60567
原文標(biāo)題:TensorFlow學(xué)習(xí)之神經(jīng)網(wǎng)絡(luò)的構(gòu)建
文章出處:【微信號(hào):AI_shequ,微信公眾號(hào):人工智能愛(ài)好者社區(qū)】歡迎添加關(guān)注!文章轉(zhuǎn)載請(qǐng)注明出處。
發(fā)布評(píng)論請(qǐng)先 登錄
相關(guān)推薦
評(píng)論