# 生成模拟数据集 rdm = RandomState(1) X = rdm.rand(128,2) Y = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X]
#训练模型。 with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 5000 for i in range(STEPS): start = (i*batch_size) % 128 end = (i*batch_size) % 128 + batch_size sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: print("After %d training step(s), w1 is: " % (i)) print sess.run(w1), "\n" print"Final w1 is: \n", sess.run(w1)
''' After 0 training step(s), w1 is: [[-0.81031823] [ 1.4855988 ]] After 1000 training step(s), w1 is: [[ 0.01247112] [ 2.1385448 ]] After 2000 training step(s), w1 is: [[ 0.45567414] [ 2.17060661]] After 3000 training step(s), w1 is: [[ 0.69968724] [ 1.8465308 ]] After 4000 training step(s), w1 is: [[ 0.89886665] [ 1.29736018]] Final w1 is: [[ 1.01934695] [ 1.04280889]] '''
with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 5000 for i in range(STEPS): start = (i*batch_size) % 128 end = (i*batch_size) % 128 + batch_size sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: print("After %d training step(s), w1 is: " % (i)) print sess.run(w1), "\n" print"Final w1 is: \n", sess.run(w1)
''' After 0 training step(s), w1 is: [[-0.81231821] [ 1.48359871]] After 1000 training step(s), w1 is: [[ 0.18643527] [ 1.07393336]] After 2000 training step(s), w1 is: [[ 0.95444274] [ 0.98088616]] After 3000 training step(s), w1 is: [[ 0.95574027] [ 0.9806633 ]] After 4000 training step(s), w1 is: [[ 0.95466018] [ 0.98135227]] Final w1 is: [[ 0.95525807] [ 0.9813394 ]] '''
#定义损失函数为MSE。 loss = tf.losses.mean_squared_error(y, y_) train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 5000 for i in range(STEPS): start = (i*batch_size) % 128 end = (i*batch_size) % 128 + batch_size sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: print("After %d training step(s), w1 is: " % (i)) print sess.run(w1), "\n" print"Final w1 is: \n", sess.run(w1)
''' After 0 training step(s), w1 is: [[-0.81031823] [ 1.4855988 ]] After 1000 training step(s), w1 is: [[-0.13337609] [ 1.81309223]] After 2000 training step(s), w1 is: [[ 0.32190299] [ 1.52463484]] After 3000 training step(s), w1 is: [[ 0.67850214] [ 1.25297272]] After 4000 training step(s), w1 is: [[ 0.89473999] [ 1.08598232]] Final w1 is: [[ 0.97437561] [ 1.0243336 ]] '''
神经网络优化算法
神经网络大致遵循以下过程:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
batch_size = n
# 每次读取一小部分数据作为当前的训练数据来执行反向传播算法。 x = tf.placeholder(...) y_ = tf.placeholder(...)
# 定义神经网络结构和优化算法。 loss = ... train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
# 训练 with tf.Session() as sess: # 参数初始化 ... # 迭代跟新参数 for i in range(STEPS): # 准备 batch_size 个训练数据。一般将所有训练数据随机打乱之后再选取可以得到更好的优化效果。 current_X, current_Y = ... sess.run(train_step, feed_dict={x: current_X, y_: current_Y})
with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): sess.run(train_op, feed_dict={x: data, y_: label}) if i % 2000 == 0: print("After %d steps, mse_loss: %f" % (i,sess.run(mse_loss, feed_dict={x: data, y_: label})))
with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): sess.run(train_op, feed_dict={x: data, y_: label}) if i % 2000 == 0: print("After %d steps, loss: %f" % (i, sess.run(loss, feed_dict={x: data, y_: label})))