label_datset = tf.data.Dataset.from_tensor_slices((out1_label,out2_label,out3_label,out4_label))
image_dataset = tf.data.Dataset.from_tensor_slices(images)
def read_jpg(path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=3)
return img
def normalize(input_image):
input_image = tf.image.resize(input_image, [scal, scal])
input_image = tf.cast(input_image, tf.float32)/127.5 – 1
return input_image
@tf.function
def load_image(input_image_path):
input_image = read_jpg(input_image_path)
input_image = normalize(input_image)
return input_image
image_dataset = image_dataset.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = tf.data.Dataset.zip((image_dataset, label_datset))
test_count = int(len(images)*0.2)
train_count = len(images) – test_count
dataset_train = dataset.skip(test_count)
dataset_test = dataset.take(test_count)
BATCH_SIZE = 8
BUFFER_SIZE = 300
STEPS_PER_EPOCH = train_count // BATCH_SIZE
VALIDATION_STEPS = test_count // BATCH_SIZE
train_dataset = dataset_train.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = dataset_test.batch(BATCH_SIZE)
xception = tf.keras.applications.Xception(weights=’imagenet’,include_top=False,input_shape=(scal, scal, 3))
xception.trianable = False
inputs = tf.keras.layers.Input(shape=(scal, scal, 3))
x = xception(inputs)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(2048, activation=’relu’)(x)
x = tf.keras.layers.Dense(256, activation=’relu’)(x)
out1 = tf.keras.layers.Dense(1)(x)
out2 = tf.keras.layers.Dense(1)(x)
out3 = tf.keras.layers.Dense(1)(x)
out4 = tf.keras.layers.Dense(1)(x)
predictions = [out1, out2, out3, out4]
model = tf.keras.models.Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),loss=’mse’,metrics=[‘mae’])
EPOCHS = 50
history = model.fit(train_dataset,
epochs=EPOCHS,steps_per_epoch=STEPS_PER_EPOCH,validation_steps=VALIDATION_STEPS,validation_data=test_dataset)
loss = history.history[‘loss’]
val_loss = history.history[‘val_loss’]
epochs = range(EPOCHS)
plt.figure()
plt.plot(epochs, loss, ‘r’, label=’Training loss’)
plt.plot(epochs, val_loss, ‘bo’, label=’Validation loss’)
plt.title(‘Training and Validation Loss’)
plt.xlabel(‘Epoch’)
plt.ylabel(‘Loss Value’)
plt.ylim([0, 1])
plt.legend()
plt.show()
model.save(‘detect_v1.h5’)
new_model = tf.keras.models.load_model(‘detect_v1.h5′)
plt.figure(figsize=(8, 24))
for img, _ in test_dataset.take(1):
out1, out2, out3, out4 = new_model.predict(img)
for i in range(6):
plt.subplot(6, 1, i+1)
plt.imshow(tf.keras.preprocessing.image.array_to_img(img[i]))
xmin, ymin, xmax, ymax = out1[i]*224, out2[i]*224, out3[i]*224, out4[i]*224,
rect = Rectangle((xmin, ymin), (xmax-xmin), (ymax-ymin), fill=False, color=’red’)
ax = plt.gca()
ax.axes.add_patch(rect)
Posted inAI