filmov
tv
Handwritten digit recognition based on CNN (Python)

Показать описание
Google Colab
Loading Dataset from Google Drive
Code:
import tensorflow as tf
import numpy as np
import time
sess = tf.InteractiveSession()
# test
x_train = input_data['train_x'].astype('float32')/255
y_train = input_data['train_y'].astype('float32')
x_test = input_data['test_x'].astype('float32')/255
y_test = input_data['test_y'].astype('float32')
# declaration
def weight_variable(shape):
return tf.Variable(initial)
def bias_variable(shape):
return tf.Variable(initial)
def conv2d(x, w):
def max_pool_2x2(x):
# input&output
# CNN structure
Featuremaps1=6;
Featuremaps2=12;
W_conv1 = weight_variable([5, 5, 1, Featuremaps1]) # Feature maps =16
b_conv1 = bias_variable([Featuremaps1])
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, Featuremaps1, Featuremaps2]) # Feature maps=32
b_conv2 = bias_variable([Featuremaps2])
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*Featuremaps2, 10])
b_fc1 = bias_variable([10])
# loss function
# test
# train on minibatches
batch_size=50
numbatches=(60000//batch_size)
num_epochs=25
for epoch in range(num_epochs):
jj=0
for i in range(numbatches):
jj=jj+batch_size;
ii= kk[batch_size*i:jj]
if i % 100 == 0:
ch_size*(i+1)].reshape(batch_size, 10)})
print("step %d, training accuracy %g" % (i, train_accuracy))
)].reshape(batch_size, 10)})
print("Epoch "+str(epoch+1)+" completed : Time usage "+str(int(end_time-start_time))+" seconds")
Loading Dataset from Google Drive
Code:
import tensorflow as tf
import numpy as np
import time
sess = tf.InteractiveSession()
# test
x_train = input_data['train_x'].astype('float32')/255
y_train = input_data['train_y'].astype('float32')
x_test = input_data['test_x'].astype('float32')/255
y_test = input_data['test_y'].astype('float32')
# declaration
def weight_variable(shape):
return tf.Variable(initial)
def bias_variable(shape):
return tf.Variable(initial)
def conv2d(x, w):
def max_pool_2x2(x):
# input&output
# CNN structure
Featuremaps1=6;
Featuremaps2=12;
W_conv1 = weight_variable([5, 5, 1, Featuremaps1]) # Feature maps =16
b_conv1 = bias_variable([Featuremaps1])
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, Featuremaps1, Featuremaps2]) # Feature maps=32
b_conv2 = bias_variable([Featuremaps2])
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*Featuremaps2, 10])
b_fc1 = bias_variable([10])
# loss function
# test
# train on minibatches
batch_size=50
numbatches=(60000//batch_size)
num_epochs=25
for epoch in range(num_epochs):
jj=0
for i in range(numbatches):
jj=jj+batch_size;
ii= kk[batch_size*i:jj]
if i % 100 == 0:
ch_size*(i+1)].reshape(batch_size, 10)})
print("step %d, training accuracy %g" % (i, train_accuracy))
)].reshape(batch_size, 10)})
print("Epoch "+str(epoch+1)+" completed : Time usage "+str(int(end_time-start_time))+" seconds")