+1 (315) 557-6473 

Create A Program to Implement Max Pooling in Python Assignment Solution.


Instructions

Objective
Write a python assignment program to implement max pooling.

Requirements and Specifications

program-to-implement-max-pooling-in-python

Source Code

import numpy as np

import tensorflow as tf

import pandas as pd

import tarfile

from PIL import Image

from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt

from tensorflow.keras.preprocessing.image import ImageDataGenerator

import os

import cv2

### Download Dataset

Dataset available at: https://www.robots.ox.ac.uk/~vgg/data/flowers/17/17flowers.tgz

!wget https://www.robots.ox.ac.uk/~vgg/data/flowers/17/17flowers.tgz

!tar -xvf /content/17flowers.tgz

### Load images into NumPy Arrays and label them

The dataset contains 80 images per class, and 17 classes. But since we only need 5 classes, we will import only the first 5*80 = 400 images

X = []

y = []

j = 0

for i in range(1,80*5+1): # a total of 80*5 images

file_dir = f"/content/jpg/image_{str(i).zfill(4)}.jpg"

img = Image.open(file_dir).resize((224,224)) # resize to 224x224

img = np.asarray(img, dtype='float32')

X.append(img)

y.append(j)

if i%80 == 0:

j += 1

X = np.asarray(X)

y = np.asarray(y).reshape(80*5,1)

### Split into train and test

test_size = 0.3 # select a 30% for test and 70% for train

X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = test_size, random_state = 42)

### Plot first 5 training images

plt.figure(figsize = (10,10))

for i in range(16):

plt.subplot(4,4,i+1)

plt.xticks([])

plt.yticks([])

plt.grid(False)

plt.imshow(X_train[i]/255, cmap = plt.cm.binary)

plt.xlabel(y_train[i])

# Model 2

model2 = tf.keras.models.Sequential()

# Data Augmentation layers

model2.add(tf.keras.layers.Rescaling(1./255))

model2.add(tf.keras.layers.RandomFlip("horizontal_and_vertical"))

model2.add(tf.keras.layers.RandomRotation(0.5))

# Rest

model2.add(tf.keras.layers.Conv2D(16, (3,3), input_shape = (224,224,3), activation='relu'))

model2.add(tf.keras.layers.MaxPooling2D(2,2))

model2.add(tf.keras.layers.Conv2D(32, (3,3), activation='relu'))

model2.add(tf.keras.layers.MaxPooling2D(2,2))

model2.add(tf.keras.layers.Conv2D(64, (3,3), activation='relu'))

model2.add(tf.keras.layers.MaxPooling2D(2,2))

model2.add(tf.keras.layers.Flatten())

model2.add(tf.keras.layers.Dense(128, activation='relu'))

# Dropout layer

model2.add(tf.keras.layers.Dropout(0.25))

model2.add(tf.keras.layers.Dense(17, activation='softmax'))

model2.compile(optimizer='rmsprop',

loss = 'sparse_categorical_crossentropy',

metrics=['accuracy'])

history2 = model2.fit(X_train, y_train, epochs = 100)

plt.figure()

plt.plot(history2.history['accuracy'], label = 'accuracy')

plt.grid(True)

plt.xlabel('Epochs')

plt.ylabel('Accuracy')

plt.show()

This one looks way better than the first one, and it is because of the Data Augmentation layers plus the Dropout layer