
TensorFlow to otwartoźródłowa biblioteka programistyczna napisana przez Google Brain Team. Wykorzystywana jest głównie w uczeniu maszynowym i głębokich sieciach neuronowych.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Define the input shape of the images
input_shape = (256, 256, 3)
train_gen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
train_iterator = train_gen.flow_from_directory('./cats_and_dogs/train/',
target_size=(256, 256),
batch_size=20,
class_mode='categorical')
test_generator = ImageDataGenerator(rescale=1. / 255)
test_iterator = test_generator.flow_from_directory(
'./cats_and_dogs/test/',
target_size=(256, 256),
shuffle=False,
class_mode='categorical',
batch_size=2)
validation_gen = ImageDataGenerator(rescale=1. / 255.0)
validation_iterator = validation_gen.flow_from_directory('./cats_and_dogs/validation/',
target_size=(256, 256),
batch_size=10,
class_mode='categorical')
# Create the model
model = keras.Sequential([
layers.Conv2D(32, (3,3), activation='relu', input_shape=input_shape),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(128, (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(128, (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dense(4, activation='softmax')
])
# Compile the model
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Train the model
history = model.fit(
train_iterator,
validation_data=validation_iterator,
steps_per_epoch=500,
epochs=100,
validation_steps=250
)
# Evaluate the model on the test data
test_loss, test_acc = model.evaluate(test_iterator)
print('Test accuracy:', test_acc)
# Save the model
model.save('cats_dogs_model.h5')
(tf) ubuntu@ComputerName:~/cat_or_dog$ python3.9 train.py 2023-03-18 09:27:22.570984: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-03-18 09:27:23.120309: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: :/home/bithusky/miniconda3/envs/tf/lib/ 2023-03-18 09:27:23.120380: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: :/home/bithusky/miniconda3/envs/tf/lib/ 2023-03-18 09:27:23.120388: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly. Found 17705 images belonging to 2 classes. Found 6137 images belonging to 2 classes. Found 6056 images belonging to 2 classes. 2023-03-18 09:27:24.560914: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:24.566222: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:24.566637: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:24.566941: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-03-18 09:27:24.567674: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:24.568042: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:24.568393: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:25.174786: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:25.175168: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:25.175197: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1700] Could not identify NUMA node of platform GPU id 0, defaulting to 0. Your kernel may not have been built with NUMA support. 2023-03-18 09:27:25.175568: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:31:00.0/numa_node Your kernel may have been built without NUMA support. 2023-03-18 09:27:25.175624: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3892 MB memory: -> device: 0, name: NVIDIA GeForce RTX 2060, pci bus id: 0000:31:00.0, compute capability: 7.5 Epoch 1/60 2023-03-18 09:27:27.112326: I tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:428] Loaded cuDNN version 8100 2023-03-18 09:27:28.629297: I tensorflow/compiler/xla/service/service.cc:173] XLA service 0x1b5ba500 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: 2023-03-18 09:27:28.629334: I tensorflow/compiler/xla/service/service.cc:181] StreamExecutor device (0): NVIDIA GeForce RTX 2060, Compute Capability 7.5 2023-03-18 09:27:28.632909: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:268] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable. 2023-03-18 09:27:28.728078: I tensorflow/compiler/jit/xla_compilation_cache.cc:477] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 400/400 [==============================] - 144s 351ms/step - loss: 0.6867 - accuracy: 0.5512 - val_loss: 0.6467 - val_accuracy: 0.5932 Epoch 2/60 400/400 [==============================] - 140s 349ms/step - loss: 0.6551 - accuracy: 0.6018 - val_loss: 0.6254 - val_accuracy: 0.6562 Epoch 3/60 400/400 [==============================] - 137s 343ms/step - loss: 0.6021 - accuracy: 0.6707 - val_loss: 0.5747 - val_accuracy: 0.6890 Epoch 4/60 400/400 [==============================] - 136s 339ms/step - loss: 0.5754 - accuracy: 0.6933 - val_loss: 0.5149 - val_accuracy: 0.7412 Epoch 5/60 400/400 [==============================] - 136s 341ms/step - loss: 0.5450 - accuracy: 0.7287 - val_loss: 0.4819 - val_accuracy: 0.7665 Epoch 6/60 400/400 [==============================] - 138s 344ms/step - loss: 0.5248 - accuracy: 0.7373 - val_loss: 0.4576 - val_accuracy: 0.7897 Epoch 7/60 400/400 [==============================] - 135s 338ms/step - loss: 0.5074 - accuracy: 0.7510 - val_loss: 0.4489 - val_accuracy: 0.7897 Epoch 8/60 400/400 [==============================] - 139s 347ms/step - loss: 0.4900 - accuracy: 0.7610 - val_loss: 0.4437 - val_accuracy: 0.7935 Epoch 9/60 400/400 [==============================] - 139s 347ms/step - loss: 0.4830 - accuracy: 0.7682 - val_loss: 0.5206 - val_accuracy: 0.7423 Epoch 10/60 400/400 [==============================] - 139s 347ms/step - loss: 0.4588 - accuracy: 0.7867 - val_loss: 0.4113 - val_accuracy: 0.8060 Epoch 11/60 400/400 [==============================] - 138s 346ms/step - loss: 0.4312 - accuracy: 0.7999 - val_loss: 0.3770 - val_accuracy: 0.8363 Epoch 12/60 400/400 [==============================] - 138s 345ms/step - loss: 0.4112 - accuracy: 0.8141 - val_loss: 0.3957 - val_accuracy: 0.8207 Epoch 13/60 400/400 [==============================] - 138s 346ms/step - loss: 0.3946 - accuracy: 0.8262 - val_loss: 0.3361 - val_accuracy: 0.8570 Epoch 14/60 400/400 [==============================] - 138s 346ms/step - loss: 0.3746 - accuracy: 0.8324 - val_loss: 0.3024 - val_accuracy: 0.8735 Epoch 15/60 400/400 [==============================] - 140s 349ms/step - loss: 0.3709 - accuracy: 0.8322 - val_loss: 0.2950 - val_accuracy: 0.8770 Epoch 16/60 400/400 [==============================] - 139s 348ms/step - loss: 0.3549 - accuracy: 0.8471 - val_loss: 0.2562 - val_accuracy: 0.8875 Epoch 17/60 400/400 [==============================] - 141s 352ms/step - loss: 0.3307 - accuracy: 0.8553 - val_loss: 0.3109 - val_accuracy: 0.8680 Epoch 18/60 400/400 [==============================] - 139s 347ms/step - loss: 0.3267 - accuracy: 0.8587 - val_loss: 0.2616 - val_accuracy: 0.8880 Epoch 19/60 400/400 [==============================] - 139s 347ms/step - loss: 0.3061 - accuracy: 0.8665 - val_loss: 0.2375 - val_accuracy: 0.8988 Epoch 20/60 400/400 [==============================] - 139s 347ms/step - loss: 0.2961 - accuracy: 0.8714 - val_loss: 0.2343 - val_accuracy: 0.9010 Epoch 21/60 400/400 [==============================] - 139s 348ms/step - loss: 0.2822 - accuracy: 0.8803 - val_loss: 0.2368 - val_accuracy: 0.9038 Epoch 22/60 400/400 [==============================] - 137s 341ms/step - loss: 0.2854 - accuracy: 0.8765 - val_loss: 0.2420 - val_accuracy: 0.8967 Epoch 23/60 400/400 [==============================] - 136s 340ms/step - loss: 0.2706 - accuracy: 0.8845 - val_loss: 0.2231 - val_accuracy: 0.9107 Epoch 24/60 400/400 [==============================] - 136s 341ms/step - loss: 0.2662 - accuracy: 0.8867 - val_loss: 0.2144 - val_accuracy: 0.9133 Epoch 25/60 400/400 [==============================] - 143s 358ms/step - loss: 0.2567 - accuracy: 0.8898 - val_loss: 0.1975 - val_accuracy: 0.9205 Epoch 26/60 400/400 [==============================] - 140s 349ms/step - loss: 0.2512 - accuracy: 0.8947 - val_loss: 0.2042 - val_accuracy: 0.9115 Epoch 27/60 400/400 [==============================] - 138s 346ms/step - loss: 0.2461 - accuracy: 0.8960 - val_loss: 0.2235 - val_accuracy: 0.9010 Epoch 28/60 400/400 [==============================] - 139s 348ms/step - loss: 0.2396 - accuracy: 0.8980 - val_loss: 0.2494 - val_accuracy: 0.8980 Epoch 29/60 400/400 [==============================] - 144s 359ms/step - loss: 0.2341 - accuracy: 0.8991 - val_loss: 0.1974 - val_accuracy: 0.9252 Epoch 30/60 400/400 [==============================] - 141s 352ms/step - loss: 0.2284 - accuracy: 0.9050 - val_loss: 0.1948 - val_accuracy: 0.9225 Epoch 31/60 400/400 [==============================] - 140s 349ms/step - loss: 0.2252 - accuracy: 0.9089 - val_loss: 0.1779 - val_accuracy: 0.9252 Epoch 32/60 400/400 [==============================] - 142s 355ms/step - loss: 0.2226 - accuracy: 0.9074 - val_loss: 0.1799 - val_accuracy: 0.9300 Epoch 33/60 400/400 [==============================] - 140s 351ms/step - loss: 0.2217 - accuracy: 0.9065 - val_loss: 0.1824 - val_accuracy: 0.9240 Epoch 34/60 400/400 [==============================] - 145s 362ms/step - loss: 0.2121 - accuracy: 0.9100 - val_loss: 0.1734 - val_accuracy: 0.9308 Epoch 35/60 400/400 [==============================] - 142s 356ms/step - loss: 0.2098 - accuracy: 0.9144 - val_loss: 0.1637 - val_accuracy: 0.9400 Epoch 36/60 400/400 [==============================] - 140s 351ms/step - loss: 0.2051 - accuracy: 0.9157 - val_loss: 0.1872 - val_accuracy: 0.9245 Epoch 37/60 400/400 [==============================] - 143s 357ms/step - loss: 0.2113 - accuracy: 0.9134 - val_loss: 0.1921 - val_accuracy: 0.9233 Epoch 38/60 400/400 [==============================] - 144s 360ms/step - loss: 0.1985 - accuracy: 0.9179 - val_loss: 0.1811 - val_accuracy: 0.9247 Epoch 39/60 400/400 [==============================] - 145s 361ms/step - loss: 0.2005 - accuracy: 0.9177 - val_loss: 0.1649 - val_accuracy: 0.9380 Epoch 40/60 400/400 [==============================] - 144s 360ms/step - loss: 0.1913 - accuracy: 0.9192 - val_loss: 0.1607 - val_accuracy: 0.9375 Epoch 41/60 400/400 [==============================] - 142s 356ms/step - loss: 0.1937 - accuracy: 0.9164 - val_loss: 0.1631 - val_accuracy: 0.9383 Epoch 42/60 400/400 [==============================] - 140s 351ms/step - loss: 0.1890 - accuracy: 0.9206 - val_loss: 0.1644 - val_accuracy: 0.9305 Epoch 43/60 400/400 [==============================] - 140s 349ms/step - loss: 0.1998 - accuracy: 0.9178 - val_loss: 0.1688 - val_accuracy: 0.9352 Epoch 44/60 400/400 [==============================] - 138s 344ms/step - loss: 0.1868 - accuracy: 0.9221 - val_loss: 0.1712 - val_accuracy: 0.9308 Epoch 45/60 400/400 [==============================] - 137s 343ms/step - loss: 0.1886 - accuracy: 0.9221 - val_loss: 0.1921 - val_accuracy: 0.9293 Epoch 46/60 400/400 [==============================] - 138s 345ms/step - loss: 0.1869 - accuracy: 0.9228 - val_loss: 0.1442 - val_accuracy: 0.9400 Epoch 47/60 400/400 [==============================] - 137s 342ms/step - loss: 0.1841 - accuracy: 0.9231 - val_loss: 0.1394 - val_accuracy: 0.9455 Epoch 48/60 400/400 [==============================] - 140s 351ms/step - loss: 0.1825 - accuracy: 0.9247 - val_loss: 0.1577 - val_accuracy: 0.9415 Epoch 49/60 400/400 [==============================] - 141s 353ms/step - loss: 0.1823 - accuracy: 0.9237 - val_loss: 0.1380 - val_accuracy: 0.9435 Epoch 50/60 400/400 [==============================] - 146s 364ms/step - loss: 0.1818 - accuracy: 0.9237 - val_loss: 0.1305 - val_accuracy: 0.9498 Epoch 51/60 400/400 [==============================] - 144s 359ms/step - loss: 0.1803 - accuracy: 0.9242 - val_loss: 0.1485 - val_accuracy: 0.9375 Epoch 52/60 400/400 [==============================] - 137s 342ms/step - loss: 0.1771 - accuracy: 0.9262 - val_loss: 0.1560 - val_accuracy: 0.9355 Epoch 53/60 400/400 [==============================] - 138s 344ms/step - loss: 0.1783 - accuracy: 0.9257 - val_loss: 0.1315 - val_accuracy: 0.9475 Epoch 54/60 400/400 [==============================] - 136s 341ms/step - loss: 0.1771 - accuracy: 0.9267 - val_loss: 0.1831 - val_accuracy: 0.9187 Epoch 55/60 400/400 [==============================] - 137s 343ms/step - loss: 0.1707 - accuracy: 0.9283 - val_loss: 0.1407 - val_accuracy: 0.9423 Epoch 56/60 400/400 [==============================] - 137s 342ms/step - loss: 0.1707 - accuracy: 0.9292 - val_loss: 0.1528 - val_accuracy: 0.9390 Epoch 57/60 400/400 [==============================] - 137s 342ms/step - loss: 0.1627 - accuracy: 0.9324 - val_loss: 0.1533 - val_accuracy: 0.9380 Epoch 58/60 400/400 [==============================] - 136s 341ms/step - loss: 0.1695 - accuracy: 0.9279 - val_loss: 0.1873 - val_accuracy: 0.9212 Epoch 59/60 400/400 [==============================] - 139s 347ms/step - loss: 0.1676 - accuracy: 0.9316 - val_loss: 0.1336 - val_accuracy: 0.9452 Epoch 60/60 400/400 [==============================] - 137s 341ms/step - loss: 0.1692 - accuracy: 0.9297 - val_loss: 0.1250 - val_accuracy: 0.9495 3069/3069 [==============================] - 11s 4ms/step - loss: 0.1332 - accuracy: 0.9451 Test accuracy: 0.945087194442749 (tf) ubuntu@ComputerName:~/cat_or_dog$
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import numpy as np
import os
# Define the input shape of the images
input_shape = (256, 256, 3)
# Load the model
model = keras.models.load_model('cats_dogs_model.h5')
path="imgs/"
dir_list = os.listdir(path)
my_class_labels=["Cat", "Dog", "Fox"]
for img_path in dir_list:
if img_path.endswith(".jpg"):
img = Image.open(path+img_path)
img = img.resize(input_shape[:2], Image.LANCZOS)
img = np.array(img) / 255.0
img = np.expand_dims(img, axis=0)
# Make a prediction
preds = model.predict(img)
# Interpret the results
class_idx = np.argmax(preds)
confidence = preds[0][class_idx]
class_label = my_class_labels[class_idx]
print(f'{img_path} => {class_label}')
print('Confidence:', str(round(confidence*100, 1))+"%")