+1 (315) 557-6473 

Create a Program to Create Human Activity Classifier in Python Assignment Solution.


Instructions

Objective
Write a program to create human activity classifier in python language.

Requirements and Specifications

program to create human activity classifier in python
program to create human activity classifier in python 1

Source Code

import tensorflow as tf

import numpy as np

import pandas as pd

import matplotlib.pyplot as plt

import random

from sklearn.model_selection import train_test_split

import sys

import time

import keras.backend as K

### Download data

url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00240/'

datafile = url + 'UCI%20HAR%20Dataset.zip'

# Download

!curl $datafile --output UCI_HAR_Dataset.zip

#unzip

!unzip -qq UCI_HAR_Dataset.zip

# change dir name to remove spaces

!mv -f UCI\ HAR\ Dataset UCI_HAR_DATASET

### Load Data into Arrays

# load the features and labels (subtract 1 as the labels aren't indexed from 0)

ytest = np.loadtxt('UCI_HAR_DATASET/test/y_test.txt')-1

ytrain = np.loadtxt('UCI_HAR_DATASET/train/y_train.txt')-1

# load the x,y,z body accelerations test data

xx=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_x_test.txt')

yy=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_y_test.txt')

zz=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_z_test.txt')

# concatenate the arrays along the last dimension

xtest = np.concatenate((xx[:,:,None],yy[:,:,None],zz[:,:,None]),axis=2).astype('float')

# (using None here adds an extra dimension of size 1 to the end of the array)

# follow the same approach for the train data

xx=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_x_train.txt')

yy=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_y_train.txt')

zz=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_z_train.txt')

xtrain = np.concatenate((xx[:,:,None],yy[:,:,None],zz[:,:,None]),axis=2).astype('float')

## Now, split Training Data into train and test, with 80% for training

X_train, X_test, y_train, y_test = train_test_split(

... xtrain, ytrain, test_size=0.2, random_state=42)

# Part 1

### Reshape arrays from 3 dimensions to 2

X_train = tf.reshape(X_train, (-1, 128*3))

X_test = tf.reshape(X_test, (-1, 128*3))

y_train = y_train.astype('int')

y_test = y_test.astype('int')

labels = np.unique(ytrain)

# Single-Layer Neural Network Classifier

### Create Weights

weights = tf.random.uniform(shape=(384,6))

offsets = tf.random.uniform(shape=(6,))

### Create Model

model1 = tf.keras.models.Sequential()

model1.add(tf.keras.layers.Dense(384, input_dim = X_train.shape[1]))

model1.add(tf.keras.layers.Dense(384, activation = 'relu'))

model1.add(tf.keras.layers.Dense(len(labels), activation = 'softmax'))

model1.layers[2].set_weights([weights, offsets])

tf.keras.utils.plot_model(model1)

### Function to compute Accuracy

def accuracy(x,y, model):

y_ = model(x)

# calculate where the prediction equals the label

correct = tf.math.equal(tf.math.argmax(y_,axis=-1),y)

# convert to a float (previously boolean)

correct = tf.cast(correct,dtype=tf.float32)

# return the mean to give the overall accuracy

return tf.math.reduce_mean(correct)

### Train Model

EPOCHS = 100

BATCH_SIZE = 64

LEARNING_RATE = 1E-3

opt = tf.keras.optimizers.SGD(learning_rate=LEARNING_RATE, decay=LEARNING_RATE / EPOCHS)

# calculate number of iteratios to do

num_iters = int(X_train.shape[0] / BATCH_SIZE)

loss_fn = loss = tf.keras.losses.SparseCategoricalCrossentropy()

accuracies = []

val_accuracies = []

losses = []

for epoch in range(0, EPOCHS):

sys.stdout.flush()

epoch_start = time.time()

for i in range(0, num_iters):

# determine starting and ending index of batch

start = i * BATCH_SIZE

end = start + BATCH_SIZE

# step

with tf.GradientTape() as tape:

# predict

X_ = X_train[start:end]

y_ = y_train[start:end]

acc = accuracy(X_train,y_train, model1)

val_acc = accuracy(X_test, y_test, model1)

y_pred = model1(X_) # this returns an array of size (N, 6) where N is the size of the test dataset.

# Pick the label with the highest probability

#y_pred = np.argmax(y_pred, axis = 1).astype('int')

loss = loss_fn(y_, y_pred)

# calculate the gradients using our tape and then update the

# model weights

grads = tape.gradient(loss, model1.trainable_weights)

opt.apply_gradients(zip(grads, model1.trainable_weights))

# show timing information for the epoch

# show the current epoch number

print("Epoch {}/{} - acc: {:.4f} - val_acc: {:.4f} - loss: {:.4f} ".format(

epoch + 1, EPOCHS, acc, val_acc, loss), end="")

epoch_end = time.time()

elapsed = (epoch_end - epoch_start)

print("-> took {:.2f} seconds".format(elapsed))

accuracies.append(acc)

losses.append(loss.numpy())

val_accuracies.append(val_acc)

### Plot Accuracy, Validation Accuracy and Loss

fig, axes = plt.subplots(nrows = 1, ncols = 3)

axes[0].plot(accuracies)

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[1].plot(val_accuracies)

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[2].plot(losses)

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

plt.show()

# Part 2

### Recreate tensors

# load the features and labels (subtract 1 as the labels aren't indexed from 0)

ytest = np.loadtxt('UCI_HAR_DATASET/test/y_test.txt')-1

ytrain = np.loadtxt('UCI_HAR_DATASET/train/y_train.txt')-1

# load the x,y,z body accelerations test data

xx=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_x_test.txt')

yy=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_y_test.txt')

zz=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_z_test.txt')

# concatenate the arrays along the last dimension

xtest = np.concatenate((xx[:,:,None],yy[:,:,None],zz[:,:,None]),axis=2).astype('float')

# (using None here adds an extra dimension of size 1 to the end of the array)

# follow the same approach for the train data

xx=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_x_train.txt')

yy=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_y_train.txt')

zz=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_z_train.txt')

xtrain = np.concatenate((xx[:,:,None],yy[:,:,None],zz[:,:,None]),axis=2).astype('float')

## Split Training Data Into Train And Test

X_train, X_test, y_train, y_test = train_test_split(

... xtrain, ytrain, test_size=0.2, random_state=42)

## Build New Model

model2 = tf.keras.models.Sequential()

model2.add(tf.keras.layers.Dense(384, input_shape = (128,3)))

model2.add(tf.keras.layers.Conv1D(32, 4, activation = 'relu', input_shape = X_train.shape))

model2.add(tf.keras.layers.BatchNormalization())

model2.add(tf.keras.layers.Activation('relu'))

model2.add(tf.keras.layers.GlobalAveragePooling1D())

model2.add(tf.keras.layers.Dense(len(labels), activation = 'softmax'))

tf.keras.utils.plot_model(model2)

model2.compile(optimizer = 'sgd', loss = 'sparse_categorical_crossentropy', metrics= ['acc'])

history2 = model2.fit(X_train, y_train, epochs = 100, validation_data = (X_test, y_test))

## Plot Accuracy, Validation Accuracy and Loss

fig, axes = plt.subplots(nrows = 1, ncols = 3)

axes[0].plot(history2.history['acc'])

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[1].plot(history2.history['val_acc'])

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[2].plot(history2.history['loss'])

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

plt.show()

## Optimize the model

### Model 2.1) Changing the BatchNormalization layer to Flatten and using MaxPooling1D layers. For this case, only one Convolutional layer is used

model2_1 = tf.keras.models.Sequential()

model2_1.add(tf.keras.layers.Conv1D(16, 3, activation = 'relu', input_shape = (128,3)))

model2_1.add(tf.keras.layers.MaxPooling1D(3))

model2_1.add(tf.keras.layers.Flatten())

model2_1.add(tf.keras.layers.Dense(len(labels), activation = 'softmax'))

# compile

model2_1.compile(optimizer = tf.keras.optimizers.SGD(learning_rate = 1E-3), loss = 'sparse_categorical_crossentropy', metrics = ['acc'])

model2_1.summary()

tf.keras.utils.plot_model(model2_1)

history2_1 = model2_1.fit(X_train, y_train, epochs = 100, validation_data = (X_test, y_test))

## Plot

fig, axes = plt.subplots(nrows = 1, ncols = 3)

axes[0].plot(history2_1.history['acc'])

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[1].plot(history2_1.history['val_acc'])

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[2].plot(history2_1.history['loss'])

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

plt.show()

### Model 2.2)

model2_2 = tf.keras.models.Sequential()

model2_2.add(tf.keras.layers.Conv1D(16, 3, activation = 'relu', input_shape = (128,3)))

model2_2.add(tf.keras.layers.MaxPooling1D(3))

model2_2.add(tf.keras.layers.Conv1D(64, 3, activation='relu'))

model2_2.add(tf.keras.layers.MaxPooling1D(3))

model2_2.add(tf.keras.layers.Conv1D(128, 3, activation='relu'))

model2_2.add(tf.keras.layers.MaxPooling1D(3))

model2_2.add(tf.keras.layers.Flatten())

model2_2.add(tf.keras.layers.Dense(len(labels), activation = 'softmax'))

# compile

model2_2.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 1E-3), loss = 'sparse_categorical_crossentropy', metrics = ['acc'])

model2_2.summary()

tf.keras.utils.plot_model(model2_2)

history2_2 = model2_2.fit(X_train, y_train, epochs = 100, validation_data = (X_test, y_test))

## Plot

fig, axes = plt.subplots(nrows = 1, ncols = 3)

axes[0].plot(history2_2.history['acc'])

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[1].plot(history2_2.history['val_acc'])

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[2].plot(history2_2.history['loss'])

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

plt.show()

## Model 2.3)

model2_3 = tf.keras.models.Sequential()

model2_3.add(tf.keras.layers.Conv1D(16, 3, activation = 'relu', input_shape = (128,3)))

model2_3.add(tf.keras.layers.MaxPooling1D(3))

model2_3.add(tf.keras.layers.Dropout(0.05))

model2_3.add(tf.keras.layers.Conv1D(64, 3, activation='relu'))

model2_3.add(tf.keras.layers.MaxPooling1D(3))

model2_3.add(tf.keras.layers.Dropout(0.05))

model2_3.add(tf.keras.layers.Conv1D(128, 3, activation='relu'))

model2_3.add(tf.keras.layers.MaxPooling1D(3))

model2_3.add(tf.keras.layers.Dropout(0.05))

model2_3.add(tf.keras.layers.Flatten())

model2_3.add(tf.keras.layers.Dense(len(labels), activation = 'softmax'))

# compile

model2_3.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 1E-3), loss = 'sparse_categorical_crossentropy', metrics = ['acc'])

tf.keras.utils.plot_model(model2_3)

model2_3.summary()

history2_3 = model2_3.fit(X_train, y_train, epochs = 100, validation_data = (X_test, y_test))

fig, axes = plt.subplots(nrows = 1, ncols = 3)

axes[0].plot(history2_3.history['acc'])

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[1].plot(history2_3.history['val_acc'])

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[2].plot(history2_3.history['loss'])

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

plt.show()

## Accuracy and Loss for all Model 2 (2, 2.1, 2.2, 2.3)

fig, axes = plt.subplots(nrows = 1, ncols = 3, figsize=(12,12))

axes[0].plot(history2.history['acc'], label = 'Model 2')

axes[0].plot(history2_1.history['acc'], label = 'Model 2.1')

axes[0].plot(history2_2.history['acc'], label = 'Model 2.2')

axes[0].plot(history2_3.history['acc'], label = 'Model 2.3')

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[0].legend()

axes[1].plot(history2.history['val_acc'], label = 'Model 2')

axes[1].plot(history2_1.history['val_acc'], label = 'Model 2.1')

axes[1].plot(history2_2.history['val_acc'], label = 'Model 2.2')

axes[1].plot(history2_3.history['val_acc'], label = 'Model 2.3')

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[1].legend()

axes[2].plot(history2.history['loss'], label = 'Model 2')

axes[2].plot(history2_1.history['loss'], label = 'Model 2.1')

axes[2].plot(history2_2.history['loss'], label = 'Model 2.2')

axes[2].plot(history2_3.history['loss'], label = 'Model 2.3')

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

axes[2].legend()

plt.show()

# Part 3

### Use all Available Data

# load the x,y,z accelerations for the three data streams for each coordinate

xx=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_x_test.txt')

yy=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_y_test.txt')

zz=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_acc_z_test.txt')

xxt=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/total_acc_x_test.txt')

yyt=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/total_acc_y_test.txt')

zzt=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/total_acc_z_test.txt')

xxg=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_gyro_x_test.txt')

yyg=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_gyro_y_test.txt')

zzg=np.loadtxt('/content/UCI_HAR_DATASET/test/Inertial Signals/body_gyro_z_test.txt')

# concatenate into a single (n,128,9) array

xtest = np.concatenate((xx[:,:,None],yy[:,:,None],zz[:,:,None],

xxt[:,:,None],yyt[:,:,None],zzt[:,:,None],

xxg[:,:,None],yyg[:,:,None],zzg[:,:,None]),axis=2)

# Train data

xx=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_x_train.txt')

yy=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_y_train.txt')

zz=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_acc_z_train.txt')

xxt=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/total_acc_x_train.txt')

yyt=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/total_acc_y_train.txt')

zzt=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/total_acc_z_train.txt')

xxg=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_gyro_x_train.txt')

yyg=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_gyro_y_train.txt')

zzg=np.loadtxt('/content/UCI_HAR_DATASET/train/Inertial Signals/body_gyro_z_train.txt')

# concatenate into a single (n,128,9) array

xtrain = np.concatenate((xx[:,:,None],yy[:,:,None],zz[:,:,None],

xxt[:,:,None],yyt[:,:,None],zzt[:,:,None],

xxg[:,:,None],yyg[:,:,None],zzg[:,:,None]),axis=2)

## Concatenate the data

X = np.concatenate((xtrain, xtest))

y = np.concatenate((ytrain, ytest))

## Split

X_train, X_test, y_train, y_test = train_test_split(

... X, y, test_size=0.2, random_state=42)

model3 = tf.keras.models.Sequential()

model3.add(tf.keras.layers.Conv1D(16, 3, activation = 'relu', input_shape = (128,9)))

model3.add(tf.keras.layers.MaxPooling1D(3))

model3.add(tf.keras.layers.Dropout(0.05))

model3.add(tf.keras.layers.Conv1D(64, 3, activation='relu'))

model3.add(tf.keras.layers.Conv1D(64, 3, activation='relu'))

model3.add(tf.keras.layers.MaxPooling1D(3))

model3.add(tf.keras.layers.Dropout(0.05))

model3.add(tf.keras.layers.Conv1D(128, 3, activation='relu'))

model3.add(tf.keras.layers.Conv1D(128, 3, activation='relu'))

model3.add(tf.keras.layers.MaxPooling1D(3))

model3.add(tf.keras.layers.Dropout(0.05))

model3.add(tf.keras.layers.BatchNormalization())

model3.add(tf.keras.layers.Activation('relu'))

model3.add(tf.keras.layers.GlobalAveragePooling1D())

#model3.add(tf.keras.layers.Flatten())

model3.add(tf.keras.layers.Dense(len(labels), activation = 'softmax'))

# compile

model3.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 1E-5), loss = 'sparse_categorical_crossentropy', metrics = ['acc'])

model3.summary()

tf.keras.utils.plot_model(model3)

history3 = model3.fit(X_train, y_train, epochs = 100, validation_data = (X_test, y_test))

### Plot Accuracy and Loss

fig, axes = plt.subplots(nrows = 1, ncols = 3)

axes[0].plot(history3.history['acc'])

axes[0].set_xlabel('Epochs')

axes[0].set_title('Accuracy')

axes[0].grid(True)

axes[1].plot(history3.history['val_acc'])

axes[1].set_xlabel('Epochs')

axes[1].set_title('Validation Accuracy')

axes[1].grid(True)

axes[2].plot(history3.history['loss'])

axes[2].set_xlabel('Epochs')

axes[2].set_title('Loss')

axes[2].grid(True)

plt.show()

### Calculate Accuracy

acc = accuracy(X_test, y_test, model3)

print("The accuracy of the model is: {:.2f}%".format(acc*100.0))