1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
| import tensorflow as tf
from numpy.random import RandomState
batch_size = 8
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1 , seed=1)) w2 = tf.Variable(tf.random_normal([3, 1], stddev=1 , seed=1))
x = tf.placeholder(tf.float32, shape=(None, 2), name = 'x-input') y_ = tf.placeholder(tf.float32, shape=(None, 1), name = 'y-input')
a = tf.matmul (x, w1) y = tf.matmul (a, w2)
y = tf.sigmoid(y) cross_entropy = -tf.reduce_mean( y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)) +(1-y)*tf.log(tf.clip_by_value(1 - y, 1e-10, 1.0))) train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
rdm = RandomState(1) dataset_size = 128 X = rdm.rand(dataset_size, 2)
Y = [[int(x1+x2 < 1)] for (x1, x2) in X]
with tf.Session() as sess: init_op = tf.global_variables_initializer()
sess.run(init_op)
print(sess.run(w1)) print(sess.run(w2))
''' 在训练之前神经网络参数的值: w1 = [[-0.81131822, 1.48459876, 0.06532937] [-2.44270396, 0.0992484, 0.59122431]] w2 = [[-0.81131822), [1.48459876], [0.06532937]] '''
STEPS = 5000 for i in range (STEPS): start = (i * batch_size) % dataset_size end = min(start+batch_size, dataset_size)
sess.run(train_step, feed_dict={x:X[start:end], y_:Y[start:end]}) if i % 1000 == 0 : total_cross_entropy = sess.run(cross_entropy, feed_dict={x:X, y_:Y}) print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy)) ''' 输出结果 : After 0 training step (s), cross entropy on all data is 1.89805 After 1000 training step (s), cross entropy on all data is 0.655075 After 2000 training step (s), cross entropy on all data is 0.626172 After 3000 training step (s), cross entropy on all data is 0.615096 After 4000 training step (s), cross entropy on all data is 0.610309 通过这个结果可以发现随着训练的进行,交叉煽是逐渐变小的。交叉熵越小说明 预测的结果和真实的结果差距越小。 ''' print(sess.run(w1)) print(sess.run(w2)) ''' 在训练之后神经网络参数的值: w1 = [[0.02476984, 0.5694868, 1.69219422] [-2.19773483, -0.23668921, 1.11438966]] w2 = [[-0.45544702], [0.49110931], [-0.9811033]]
可以发现这两个参数的取值已经发生了变化,这个变化就是训练的结果。 它使得这个神经网络能更好地拟合提供的训练数据。 '''
|