Commit 25fcf5d5 authored by ben.eastwood's avatar ben.eastwood

Initial commit

parents
{
"cells": [
{
"cell_type": "code",
"execution_count": 10,
"id": "21a304bb",
"metadata": {},
"outputs": [
{
"ename": "IndentationError",
"evalue": "expected an indented block after 'for' statement on line 57 (1098499893.py, line 59)",
"output_type": "error",
"traceback": [
"\u001b[1;36m Cell \u001b[1;32mIn[10], line 59\u001b[1;36m\u001b[0m\n\u001b[1;33m model = define_model()\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mIndentationError\u001b[0m\u001b[1;31m:\u001b[0m expected an indented block after 'for' statement on line 57\n"
]
}
],
"source": [
"# baseline cnn model for mnist\n",
"from numpy import mean\n",
"from numpy import std\n",
"from matplotlib import pyplot as plt\n",
"from sklearn.model_selection import KFold\n",
"from tensorflow.keras.datasets import mnist\n",
"from tensorflow.keras.utils import to_categorical\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Conv2D\n",
"from tensorflow.keras.layers import MaxPooling2D\n",
"from tensorflow.keras.layers import Dense\n",
"from tensorflow.keras.layers import Flatten\n",
"from tensorflow.keras.optimizers import SGD\n",
" \n",
"# load train and test dataset\n",
"def load_dataset():\n",
" # load dataset\n",
" (trainX, trainY), (testX, testY) = mnist.load_data()\n",
" # reshape dataset to have a single channel\n",
" trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))\n",
" testX = testX.reshape((testX.shape[0], 28, 28, 1))\n",
" # one hot encode target values\n",
" trainY = to_categorical(trainY)\n",
" testY = to_categorical(testY)\n",
" return trainX, trainY, testX, testY\n",
" \n",
"# scale pixels\n",
"def prep_pixels(train, test):\n",
" # convert from integers to floats\n",
" train_norm = train.astype('float32')\n",
" test_norm = test.astype('float32')\n",
" # normalize to range 0-1\n",
" train_norm = train_norm / 255.0\n",
" test_norm = test_norm / 255.0\n",
" # return normalized images\n",
" return train_norm, test_norm\n",
" \n",
"# define cnn model\n",
"def define_model():\n",
" model = Sequential()\n",
" model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))\n",
" model.add(MaxPooling2D((2, 2)))\n",
" model.add(Flatten())\n",
" model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))\n",
" model.add(Dense(10, activation='softmax'))\n",
" # compile model\n",
" opt = SGD(learning_rate=0.01, momentum=0.9)\n",
" model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n",
" return model\n",
" \n",
"# evaluate a model using k-fold cross-validation\n",
"def evaluate_model(dataX, dataY, n_folds=5):\n",
" scores, histories = list(), list()\n",
" # prepare cross validation\n",
" kfold = KFold(n_folds, shuffle=True, random_state=1)\n",
" # enumerate splits\n",
" for train_ix, test_ix in kfold.split(dataX):\n",
" # define model\n",
" model = define_model()\n",
" # select rows for train and test\n",
" trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]\n",
" # fit model\n",
" history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)\n",
" # evaluate model\n",
" _, acc = model.evaluate(testX, testY, verbose=0)\n",
" print('> %.3f' % (acc * 100.0))\n",
" # stores scores\n",
" scores.append(acc)\n",
" histories.append(history)\n",
" return scores, histories\n",
" \n",
"# plot diagnostic learning curves\n",
"def summarize_diagnostics(histories):\n",
" for i in range(len(histories)):\n",
" # plot loss\n",
" plt.subplot(2, 1, 1)\n",
" plt.title('Cross Entropy Loss')\n",
" plt.plot(histories[i].history['loss'], color='blue', label='train')\n",
" plt.plot(histories[i].history['val_loss'], color='orange', label='test')\n",
" # plot accuracy\n",
" plt.subplot(2, 1, 2)\n",
" plt.title('Classification Accuracy')\n",
" plt.plot(histories[i].history['accuracy'], color='blue', label='train')\n",
" plt.plot(histories[i].history['val_accuracy'], color='orange', label='test')\n",
" plt.show()\n",
" \n",
"# summarize model performance\n",
"def summarize_performance(scores):\n",
" # print summary\n",
" print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))\n",
" # box and whisker plots of results\n",
" plt.boxplot(scores)\n",
" plt.show()\n",
" \n",
"# run the test harness for evaluating a model\n",
"def run_test_harness():\n",
" # load dataset\n",
" trainX, trainY, testX, testY = load_dataset()\n",
" # prepare pixel data\n",
" trainX, testX = prep_pixels(trainX, testX)\n",
" # evaluate model\n",
" scores, histories = evaluate_model(trainX, trainY)\n",
" # learning curves\n",
" summarize_diagnostics(histories)\n",
" # summarize estimated performance\n",
" summarize_performance(scores)\n",
" \n",
"# entry point, run the test harness\n",
"run_test_harness()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38771ec7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
File added
File added
This diff is collapsed.
File added
import os
import PIL
import cv2
import glob
import numpy as np
from tkinter import *
from PIL import Image, ImageDraw, ImageGrab
# Tkinter done using https://medium.com/analytics-vidhya/handwritten-digit-recognition-gui-app-46e3d7b37287
def Clear_Widget():
global cv
cv.delete("all")
def Activate_Event(event):
global lastx, lasty
cv.bind('<B1-Motion>', Draw_Lines)
lastx, lasty = event.x, event.y
def Draw_Lines(event):
global lastx, lasty
x, y = event.x, event.y
cv.create_line((lastx, lasty, x, y), width=8, fill='black', capstyle=ROUND, smooth=TRUE, splinesteps=12)
lastx, lasty = x, y
def Recognise_Digit():
global image_number
predictions = []
percentage = []
filename = f'image_{image_number}.png'
widget = cv
x=root.winfo_rootx()+widget.winfo_x()
y=root.winfo_rooty()+widget.winfo_y()
x1=x+widget.winfo_width()
y1=y+widget.winfo_height()
ImageGrab.grab().crop((x,y,x1,y1)).save(filename)
image = cv2.imread(filename, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret,th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image, (x,y), (x+w, y+h), (255,0,0), 1)
top = int(0.05 * th.shape[0])
bottom = top
left = int(0.05 * th.shape[1])
right = left
th_up = cv2.copyMakeBorder(th, top, bottom, left, right, cv2.BORDER_REPLICATE)
roi = th[y-top:y+h+bottom, x-left:x+w+right]
img = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
img = img.reshape(1,28,28,1)
img = img/255.0
pred = model.predict([img])[0]
final_pred = np.argmax(pred)
data = str(final_pred) + ' ' + str(int(max(pred)*100))+'%'
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
color = (255,0,0)
thickness = 1
cv2.putText(image, data, (x,y-5), font, fontScale, color, thickness)
cv2.imshow('image', image)
cv2.waitKey(0)
# Load model
from keras.models import load_model
model = load_model(r'D:\University\AI\Project\model.h5')
# Create a main window
root = Tk()
root.resizable(0, 0)
root.title("Digit Recognition")
# Initialise variables
lastx, lasty = None, None
image_number = 0
# Create canvas
cv = Canvas(root, width=640, height=480, bg='white')
cv.grid(row=0, column=0, pady=2, sticky=W, columnspan=2)
# Deal with events
cv.bind('<Button-1>', Activate_Event)
# Add buttons
btn_save = Button(text="Recognise Digit", command=Recognise_Digit)
btn_save.grid(row=2, column=0, pady=1, padx=1)
btn_clear = Button(text="Clear", command=Clear_Widget)
btn_clear.grid(row=2, column=1, pady=1, padx=1)
root.mainloop()
\ No newline at end of file
File added
File added
File added
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment