Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • ar-noc/keras-smoke-detection
  • i3perez/keras-smoke-detection
  • hpwren-dev/keras-smoke-detection
3 results
Show changes
Showing
with 438 additions and 0 deletions
import keras
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import SeparableConv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
import matplotlib
matplotlib.use("Agg")
# import the necessary packages
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
import sys
import re
from PIL import Image
import matplotlib
matplotlib.use("Agg")
import pandas as pd
import tempfile
from tensorflow.keras.callbacks import LambdaCallback
physical_devices = tf.config.experimental.list_physical_devices('GPU')
# physical_devices = tf.config.experimental.list_physical_device
tf.config.experimental.set_memory_growth(physical_devices[0], True)
assert tf.config.experimental.get_memory_growth(physical_devices[0])
# K.tensorflow_backend._get_available_gpus()
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from keras import backend as K
def mcor(y_true, y_pred):
#matthews_correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
#Split portions between traning and test
TRAIN_SPLIT = 0.75
TEST_SPLIT = 0.25
#Learning Rate
INIT_LR = 1e-7
BATCH_SIZE = 8
NUM_EPOCHS = 100
#Image sizes
image_size = 1024,768
#Important to corroborate with the entropy function
class_mode = "binary"
#image generators
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=30,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
validation_split=0,
horizontal_flip=True,
fill_mode="nearest")
image_generatorCLASSIC = tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=30,
zoom_range=0,
width_shift_range=0,
height_shift_range=0,
shear_range=0,
validation_split=0,
horizontal_flip=True,
fill_mode="nearest")
#Location where the data is, in fire/nonfire sub-directories
dataDirectoryTrain = "/userdata/kerasData/preloaded/flowDirectory4/train/"
dataDirectoryValidation = "/userdata/kerasData/preloaded/flowDirectory4/validation/"
dataDirectoryTest = "/userdata/kerasData/preloaded/flowDirectory4/test/"
# Image Generators, check for the subset error
trainingGeneratorHPWREN = image_generator.flow_from_directory(
dataDirectoryTrain,
target_size=image_size,
seed=42,
batch_size=BATCH_SIZE,
class_mode=class_mode)
validationGeneratorHPWREN = image_generator.flow_from_directory(
dataDirectoryValidation,
target_size=image_size,
batch_size=BATCH_SIZE,
seed=42,
class_mode=class_mode)
# subset = "training")
testGeneratorHPWREN = image_generatorCLASSIC.flow_from_directory(
dataDirectoryTest,
target_size=image_size,
batch_size=BATCH_SIZE,
seed=42,
class_mode=class_mode)
# subset = "training")
# Define the model here
class FireDetectionNet:
@staticmethod
def build(width, height, depth):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
model.add(SeparableConv2D(32, (7, 7), padding="same",
input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(64, (5,5), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# second set of FC => RELU layers
model.add(Dense(128))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(2))
model.add(Activation("softmax"))
# return the constructed network architecture
return model
# This name is really important because the model history and strucuture is indexed by this
# name = "HPWRENGroundUp_1024_SPLIT2_v1_e3"
name = "TEST"
# Parameteres and model initaliazation
opt = SGD(lr=INIT_LR, momentum=0.9,
decay=INIT_LR / NUM_EPOCHS)
groundUpModel = FireDetectionNet.build(width=1024, height=768, depth=3)
# Compiles the model
groundUpModel.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy", precision, recall, f1])
mc = tf.keras.callbacks.ModelCheckpoint(f'/userdata/kerasData/pyimagesearch/output/experimental/{name}HPWREN.model', monitor='val_loss', mode='auto', save_freq='epoch', verbose=1)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
# This trains the model
history = groundUpModel.fit(
trainingGeneratorHPWREN,
validation_data=validationGeneratorHPWREN,
steps_per_epoch=len(trainingGeneratorHPWREN) // BATCH_SIZE,
validation_steps= len(validationGeneratorHPWREN) // BATCH_SIZE,
epochs=NUM_EPOCHS,
callbacks=[mc, early_stopping_callback],
verbose=1
)
history_df = pd.DataFrame(history.history)
hist_csv_file=f"/userdata/kerasData/output/recreate/{name}"
with open(hist_csv_file, mode='w') as f:
history_df.to_csv(f)
to use this demo, go to the notebook provided and enter the correct pathway to the model (h5 file) and the image directory. Everything else should be provided.
\ No newline at end of file
File added
%% Cell type:code id: tags:
``` python
import tensorflow as tf
import os
from PIL import Image
import numpy as np
import cv2
import re
```
%% Cell type:code id: tags:
``` python
tf.test.is_built_with_cuda()
tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)
```
%% Output
True
%% Cell type:code id: tags:
``` python
test = np.array([1,2,3,4])
test/4
```
%% Output
array([0.25, 0.5 , 0.75, 1. ])
%% Cell type:code id: tags:
``` python
pathToModel = "/firstSplitmodel.h5"
pathToImageDir = "/images/setForLk"
```
%% Cell type:code id: tags:
``` python
def loadData(datasetPath):
tempF = []
fireLabel = []
for element in os.listdir(datasetPath):
if re.search('\+', element):
fireLabel.append(1)
else:
fireLabel.append(0)
image = cv2.imread(datasetPath + "/"+ element)
image = cv2.resize(image, (128,128))
tempF.append(image)
labels = np.array(fireLabel)
labels = tf.keras.utils.to_categorical(labels, num_classes=2)
data = np.array(tempF)
data = np.true_divide(data, 255)
return data, labels
data, Label = loadData(pathToImageDir)
# cv2.imread("/userdata/kerasData/images/setForLk/1512676384_+02400.jpg")
```
%% Cell type:code id: tags:
``` python
model = tf.keras.models.load_model(pathToImageDirectory)
```
%% Cell type:code id: tags:
``` python
model.predict(data)
```
%% Output
array([[0.5107457 , 0.4892543 ],
[0.41123033, 0.5887697 ],
[0.43183708, 0.56816286],
[0.44897774, 0.55102223],
[0.47739947, 0.52260053],
[0.3578842 , 0.6421158 ],
[0.4941746 , 0.5058254 ],
[0.34056282, 0.6594372 ],
[0.35624158, 0.64375836],
[0.47470272, 0.5252973 ],
[0.4808885 , 0.5191116 ],
[0.431317 , 0.568683 ],
[0.5134212 , 0.4865788 ],
[0.40128127, 0.59871876],
[0.4639625 , 0.53603756],
[0.35375822, 0.64624184],
[0.36264202, 0.63735795],
[0.45890424, 0.5410958 ],
[0.33185333, 0.66814667],
[0.3523977 , 0.6476023 ],
[0.34115818, 0.6588418 ],
[0.48866385, 0.5113361 ],
[0.455409 , 0.544591 ],
[0.46895435, 0.5310457 ],
[0.38263088, 0.6173691 ],
[0.5233783 , 0.47662166],
[0.44299665, 0.5570033 ],
[0.47639135, 0.5236086 ],
[0.3761411 , 0.6238589 ],
[0.47419727, 0.52580273],
[0.40086877, 0.5991312 ],
[0.48351654, 0.5164834 ],
[0.43688735, 0.5631126 ],
[0.4520515 , 0.5479485 ],
[0.3890876 , 0.6109124 ],
[0.4929448 , 0.50705516],
[0.35062018, 0.64937985],
[0.46544933, 0.53455067],
[0.4987746 , 0.5012254 ],
[0.4678061 , 0.53219396],
[0.468355 , 0.53164506],
[0.37291136, 0.62708867],
[0.5106242 , 0.4893759 ],
[0.4318202 , 0.5681798 ],
[0.39998996, 0.60001004],
[0.40117145, 0.59882855],
[0.45087582, 0.5491242 ],
[0.3823646 , 0.6176354 ],
[0.3389846 , 0.6610154 ],
[0.36759573, 0.63240427],
[0.4042852 , 0.5957148 ],
[0.39802316, 0.6019768 ],
[0.34497583, 0.6550242 ],
[0.4933747 , 0.5066253 ],
[0.4834825 , 0.51651746],
[0.43642092, 0.5635791 ],
[0.47396407, 0.52603596],
[0.4887493 , 0.5112507 ],
[0.47656032, 0.52343965],
[0.46176285, 0.53823715],
[0.37929055, 0.6207095 ],
[0.49937135, 0.50062865],
[0.48522255, 0.5147774 ],
[0.45080933, 0.5491907 ],
[0.46133736, 0.5386627 ],
[0.39081547, 0.60918456],
[0.41144982, 0.58855015],
[0.52494085, 0.47505915],
[0.38156646, 0.61843354],
[0.36862728, 0.6313727 ],
[0.4544768 , 0.5455232 ],
[0.34168276, 0.6583172 ],
[0.37399516, 0.6260049 ],
[0.47957534, 0.52042466],
[0.50483143, 0.49516863],
[0.4877852 , 0.5122148 ],
[0.42307466, 0.57692534],
[0.51999307, 0.4800069 ],
[0.39840838, 0.60159165],
[0.4879313 , 0.5120687 ],
[0.4254754 , 0.57452464]], dtype=float32)
%% Cell type:code id: tags:
``` python
print(model.evaluate(x=data, y=Label))
print(model.metrics_names)
```
%% Output
81/1 [==============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================] - 0s 588us/sample - loss: 0.5996 - accuracy: 0.5926
[0.6112031642301583, 0.5925926]
['loss', 'accuracy']
%% Cell type:code id: tags:
``` python
```
wildfireDemo/setForLk/1512671584_-02400.jpg

360 KiB

wildfireDemo/setForLk/1512671644_-02340.jpg

360 KiB

wildfireDemo/setForLk/1512671704_-02280.jpg

362 KiB

wildfireDemo/setForLk/1512671764_-02220.jpg

360 KiB

wildfireDemo/setForLk/1512671824_-02160.jpg

356 KiB

wildfireDemo/setForLk/1512671884_-02100.jpg

369 KiB

wildfireDemo/setForLk/1512671944_-02040.jpg

357 KiB

wildfireDemo/setForLk/1512672004_-01980.jpg

372 KiB

wildfireDemo/setForLk/1512672064_-01920.jpg

355 KiB

wildfireDemo/setForLk/1512672124_-01860.jpg

366 KiB

wildfireDemo/setForLk/1512672184_-01800.jpg

356 KiB

wildfireDemo/setForLk/1512672244_-01740.jpg

366 KiB

wildfireDemo/setForLk/1512672304_-01680.jpg

356 KiB

wildfireDemo/setForLk/1512672364_-01620.jpg

366 KiB

wildfireDemo/setForLk/1512672424_-01560.jpg

355 KiB

wildfireDemo/setForLk/1512672484_-01500.jpg

374 KiB