MNIST

Table of Contents

1. MNIST

import tensorflow as tf
import numpy as np

from tensorflow.keras.datasets import mnist
from tensorflow.keras import layers,losses,metrics,optimizers, models

# load data

1.1. simple

(x_train,y_train), (x_test, y_test) = mnist.load_data()

tf.keras.backend.clear_session()

model = models.Sequential()
model.add(layers.Flatten(input_shape = (28, 28)))
model.add(layers.Dense(100,  activation = "relu"))
model.add(layers.Dense(10, activation = "softmax"))

model.compile(optimizer="adam",loss="sparse_categorical_crossentropy", metrics=["accuracy"])

model.fit(x_train, y_train, batch_size = 200, epochs = 10, verbose = 0)

model.evaluate(x_test, y_test, verbose=2)

313/313 - 0s - loss: 0.3964 - accuracy: 0.9426

1.2. one-hot

(x_train,y_train), (x_test, y_test) = mnist.load_data()

tf.keras.backend.clear_session()

x_train = tf.reshape(x_train, (-1, 28 * 28))
y_train = tf.one_hot(y_train, depth = 10)

x_test = tf.reshape(x_test, (-1, 28 * 28))
y_test = tf.one_hot(y_test, depth = 10)

model = models.Sequential()
model.add(layers.Dense(100, input_shape = (28 * 28, ), activation = "relu"))
model.add(layers.Dense(10, activation = "softmax"))

model.compile(optimizer="adam",loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size = 200, epochs = 10, verbose = 0)

model.evaluate(x_test, y_test, verbose=2)

313/313 - 0s - loss: 0.3311 - accuracy: 0.9362

1.3. manually fit

(x_train,y_train), (x_test, y_test) = mnist.load_data()

tf.keras.backend.clear_session()

x_train = tf.reshape(x_train, (-1, 28 * 28))
y_train = tf.one_hot(y_train, depth = 10)

x_test = tf.reshape(x_test, (-1, 28 * 28))
y_test = tf.one_hot(y_test, depth = 10)

model = models.Sequential()
model.add(layers.Dense(100, input_shape = (28 * 28, ), activation = "relu"))
model.add(layers.Dense(10, activation = "softmax"))

loss_func = tf.keras.losses.CategoricalCrossentropy()
evaluator = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

optimizer = tf.keras.optimizers.Adam()

train_dataset = tf.data.Dataset.from_tensor_slices(
    (x_train, y_train)).batch(200)

for epoch in range(20):
  for x, y in train_dataset:
      with tf.GradientTape() as tape:
          y_hat= model(x)
          loss = loss_func(y, y_hat)
          gradients = tape.gradient(loss, model.trainable_variables)
          optimizer.apply_gradients(zip(gradients, model.trainable_variables))

evaluator(y_test, model(x_test))
print(evaluator.result())

tf.Tensor(0.955, shape=(), dtype=float32)

Author: [email protected]
Date: 2020-07-27 Mon 00:00
Last updated: 2020-07-29 Wed 18:32

知识共享许可协议