tensorflow 学习笔记:损失函数(一)

    技术2025-11-02  21

    常用的均方误差,作为损失函数

    #coding:utf-8 """ 拟合出预测酸奶日销量的函数模型, 模拟真实场景数据,对y_增加噪声:-0.05~+0.05 """ # 0.导入模块,生成数据集 import tensorflow as tf import numpy as np BATCH_SIZE = 8 # 随机种子SEED ,实际应用是不需要写的,我们模拟的随机数据和mooc课堂保持一致,方便debug SEED = 2345 rdm = np.random.RandomState(SEED) X = rdm.rand(32, 2) # 模拟出来的数据 y_ 正确答案是两个输入集2特征之和,并且增加噪声 Y_ = [[x1+x2+(rdm.rand()/10.0-0.05)] for(x1, x2) in X] # 1.定义神经网络的输入,参数和输出,定义向前传播过程 x = tf.placeholder(tf.float32, shape=(None, 2)) y_ = tf.placeholder(tf.float32, shape=(None, 1)) w1 = tf.Variable(tf.random_normal([2, 1], stddev=1,seed=1)) y = tf.matmul(x, w1) # 2.定义损失函数以及反向传播方法 # 损失函数为MSE,反向传播方法为梯度下降 loss_mes = tf.reduce_mean(tf.square(y_-y)) train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss_mes) # 3.生成会话,训练STEPS轮 with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 20000 for i in range(STEPS): start = (i * BATCH_SIZE) % 32 end = start + BATCH_SIZE sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]}) if i % 500==0: print("After %d training steps,w1 is :\n" % i) print(sess.run(w1)) print("final w1 is :\n", sess.run(w1))
    Processed: 0.017, SQL: 9