tensorflow2.0,神经网络fit函数及梯度下降代码实现

Isabel ·
更新时间:2024-11-13
· 574 次阅读

import matplotlib.pyplot as plt from tensorflow import keras import tensorflow as tf import matplotlib as mpl import pandas as pd import numpy as np import sklearn import time import sys import os # 加载数据 from sklearn.datasets import fetch_california_housing housing = fetch_california_housing() print(housing.data.shape) # (20640, 8) print(housing.target.shape) # (20640, ) # 切分数据集 from sklearn.model_selection import train_test_split x_train_all, x_test, y_train_all, y_test = train_test_split(housing.data, housing.target, random_state = 7) x_train, x_valid, y_train, y_valid = train_test_split(x_train_all, y_train_all, random_state = 11) print(x_train.shape, y_train.shape) # (11610, 8) (11610,) print(x_valid.shape, y_valid.shape) # (3870, 8) (3870,) print(x_test.shape, y_test.shape) # (5160, 8) (5160,) # 数据归一化 x = (x - u) / d from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train_scaled = scaler.fit_transform(x_train) x_valid_scaled = scaler.transform(x_valid) x_test_scaled = scaler.transform(x_test) # metric使用 metric = keras.metrics.MeanSquaredError() # metric 会自动将结果累加再求平均 print(metric([5.], [2.])) # tf.Tensor(9.0, shape=(), dtype=float32) print(metric([0.], [1.])) # 等于5 已经自动将上面的结果加上再平均了 print(metric.result()) # 打印最后的结果 metric.reset_states() # 消除之前的记录 # 这一句结果和上面一样 print(metric([5., 0.], [2., 1.])) # [(5-2)^2+(0-1)^2] / 2 metric.reset_states() metric([1.], [3.]) # tf.Tensor(4.0, shape=(), dtype=float32) print(metric.result()) # 建模 # fit函数的作用如下 # 1. batch 遍历训练集 metric # 1.1 自动求导 # 2. epoch结束 验证集 metric epochs = 100 batch_size = 32 # 每个epoch训练多少个batch steps_per_epoch = len(x_train_scaled) // batch_size optimizer = keras.optimizers.SGD() metric = keras.metrics.MeanSquaredError() # 从样本中随机取数据 def random_batch(x, y, batch_size=32): idx = np.random.randint(0, len(x), size=batch_size) return x[idx], y[idx] model = keras.models.Sequential([ keras.layers.Dense(30, activation='relu', input_shape=x_train.shape[1:]), keras.layers.Dense(1),]) for epoch in range(epochs): metric.reset_states() for step in range(steps_per_epoch): x_batch, y_batch = random_batch(x_train_scaled, y_train,batch_size) with tf.GradientTape() as tape: y_pred = model(x_batch) # y_pred = tf.squeeze(y_pred, 1) loss = keras.losses.mean_squared_error(y_batch, y_pred) metric(y_batch, y_pred) grads = tape.gradient(loss, model.variables) grads_and_vars = zip(grads, model.variables) optimizer.apply_gradients(grads_and_vars) print("\rEpoch", epoch, " train mse:", metric.result().numpy(), end="") y_valid_pred = model(x_valid_scaled) # y_valid_pred = tf.squeeze(y_valid_pred, 1) valid_loss = keras.losses.mean_squared_error(y_valid_pred, y_valid) print("\t", "valid mse: ", valid_loss.numpy())
作者:WANGBINLONG-



梯度下降 梯度 tensorflow

需要 登录 后方可回复, 如果你还没有账号请 注册新账号