本記事では、TensorBoardを使う雛形コードを載せました。下図例はJupyterLab上で、tensorflowはv2.4.1で動作確認しています。

分析内容とそのコードの元は次のリンク先です。違いは、タイトル通りにJupyter上でインタラクティブに、そしてTensorBoardを使うようにしたことです。
hk29.hatenablog.jp
■本プログラム
get_ipython().run_line_magic('load_ext', 'tensorboard')
import os, glob
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 18
import datetime
now = datetime.datetime.now()
now = now.strftime("%y%m%d_%H%M%S")
print('tensorflow', tf.__version__)
os.makedirs("logs", exist_ok=True)
dir_path_for_set_label = './trainingSet'
label_data_list = []
cnt = 0
for dirpath, dirnames, fnames in os.walk(dir_path_for_set_label):
parent_dirname = dirpath.split(os.path.sep)
if len(parent_dirname) >= 2:
dirpath = dirpath.replace('/', os.sep)
label_data_list.append((dirpath, cnt))
cnt += 1
print(label_data_list)
myclasses = len(label_data_list)
print('classes', myclasses)
test_size = 0.3
img_width = 28
img_height = 28
img_ch = 3
data_x = []
data_y = []
for label_data in label_data_list:
label_path, label_no = label_data
file_path_list = glob.glob(label_path + '/*.jpg')
for file_path in file_path_list:
img = img_to_array(load_img(file_path, target_size=(img_width, img_height, img_ch)))
data_x.append(img)
data_y.append(label_no)
x_np = np.asarray(data_x)
y_np = np.asarray(data_y)
x_train, x_test, y_train, y_test = train_test_split(x_np, y_np, test_size=test_size)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = to_categorical(y_train, myclasses)
y_test = to_categorical(y_test, myclasses)
print(r'x train, y_train')
print(x_train.shape, x_train.shape)
print(r'x test, y_test')
print(x_test.shape, y_test.shape)
epochs = 5
batch_size = 16
drop_rate = 0.2
model = keras.Sequential([
keras.layers.Conv2D(filters = 32,
kernel_size = (3,3),
padding = "same",
activation = "relu",
input_shape = x_train.shape[1:]),
keras.layers.MaxPooling2D(pool_size=(2,2)),
keras.layers.Dropout(drop_rate),
keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2,2)),
keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2,2)),
keras.layers.Dropout(drop_rate),
keras.layers.Flatten(),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(myclasses, activation='sigmoid')
])
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
log_dir = "logs/scalars/" + now
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
file_writer = tf.summary.create_file_writer(log_dir + "/metrics")
file_writer.set_as_default()
def lr_schedule(epoch):
learning_rate = 0.2
if epoch > 10:
learning_rate = 0.02
if epoch > 20:
learning_rate = 0.01
if epoch > 50:
learning_rate = 0.005
tf.summary.scalar('learning rate', data=learning_rate, step=epoch)
return learning_rate
lr_callback = tf.keras.callbacks.LearningRateScheduler(lr_schedule)
get_ipython().run_line_magic('tensorboard', '--logdir logs/ --bind_all --port 6006 --reload_multifile true')
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.3,
callbacks=[tensorboard_callback, lr_callback],
)
open('model_' + now + '.json', 'w').write(model.to_json())
model.save_weights('model_' + now + '_weight.hdf5')
get_ipython().run_line_magic('tensorboard', '--logdir logs/')
以上
<広告>
リンク