整个的模型搭建在以下代码中显示并标注了注释。
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
#为了能在notebook中显示图像
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
from sklearn.datasets import fetch_california_housing #从sklearn中引用加州的房价数据
housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)
#引用train_test_split对数据集进行拆分
# test_size 控制切分比例,默认切分比例3:1
from sklearn.model_selection import train_test_split
#拆分数据集,加载数据集后返回训练集以及测试集
x_train_all, x_test, y_train_all, y_test = train_test_split(housing.data, housing.target, random_state = 1)
#将训练集进行一次拆分为验证集和测试集
x_train, x_valid, y_train, y_valid = train_test_split(x_train_all, y_train_all, random_state=2)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
#对数据进行归一化处理
#由于transform处理处理数据时二维数组,所以要将数据转化一下
#x_train: [none, 28, 28] -> [none, 784]
#对于使用fit_transform 和transform 请参考我的TensorFlow中的博客
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)
#注意在归一化数据后,之后使用的数据要使用新的归一化数据
搭建wide&deep模型
#函数式API实现wide&deep模型
#输入
input = keras.layers.Input(shape = x_train.shape[1:])
#deep层构建
hidden1 = keras.layers.Dense(30, activation='relu')(input)
hidden2 = keras.layers.Dense(30, activation= 'relu')(hidden1)
#拼接wide&deep结果
concat = keras.layers.concatenate([input, hidden2])
#输出结果
output = keras.layers.Dense(1)(concat)
#固化模型(Model)
model = keras.models.Model(inputs = [input],
outputs = [output])
model.summary()
#编译compile
model.compile(loss = "mean_squared_error", #损失函数:使用均方根误差
optimizer = "adam", #优化函数
)
#使用回调函数
callbacks = [
keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),
]
#训练模型会,返回一个结果保存在history中
history = model.fit(x_train_scaled, y_train, epochs=50,
validation_data=(x_valid_scaled, y_valid),
callbacks=callbacks) #使用回调函数
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show
plot_learning_curves(history)
model.evaluate(x_test_scaled, y_test)