Linear Regression Concise with Tensorflow
Linear Regression Concise with Tensorflow
- Generate the Dataset
- Reading the Dataset
- Defining the Model & Initializing Parameters
- Defining the Loss Function
- Defining the Optimization Algorithm
- Training
import tensorflow as tf
import random
import matplotlib.pyplot as plt
%matplotlib inline
def synthetic_data(w, b, num_examples):
"""Generate y = Xw + b + noise."""
X = tf.zeros((num_examples, w.shape[0]))
X += tf.random.normal(shape=X.shape)
y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b
y += tf.random.normal(shape=y.shape, stddev=0.01)
y = tf.reshape(y, (-1, 1))
return X, y
true_w = tf.constant([2, -3.4])
true_b = 4.2
X, y = synthetic_data(true_w, true_b, 1000)
def load_array(data_arrays, batch_size, is_train=True): #@save
"""Construct a TensorFlow data iterator."""
dataset = tf.data.Dataset.from_tensor_slices(data_arrays)
if is_train:
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.batch(batch_size)
return dataset
batch_size = 10
data_iter = load_array((X, y), batch_size)
initializer = tf.initializers.RandomNormal(stddev=0.01)
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))
loss = tf.keras.losses.MeanSquaredError()
loss = tf.keras.losses.Huber()
trainer = tf.keras.optimizers.SGD(learning_rate=0.03)
num_epochs = 3
for epoch in range(num_epochs):
for X_,y_ in data_iter:
with tf.GradientTape() as tape:
l = loss(y_,net(X_, training = True))
grads = tape.gradient(l,net.trainable_variables)
trainer.apply_gradients(zip(grads,net.trainable_variables))
l = loss(net(X), y)
print(f'epoch {epoch +1}, loss {l:f}')
net.get_weights()
a = tf.reshape(tf.range(3), (3, 1))
b = tf.reshape(tf.range(2), (1, 2))
a, b