|
|
@ -42,9 +42,9 @@ train_labels = np.repeat(k,250)[:,np.newaxis] |
|
|
|
test_labels = train_labels.copy() |
|
|
|
test_labels = train_labels.copy() |
|
|
|
|
|
|
|
|
|
|
|
# Initiate kNN, train the data, then test it with test data for k=1 |
|
|
|
# Initiate kNN, train the data, then test it with test data for k=1 |
|
|
|
knn = cv2.KNearest() |
|
|
|
knn = cv2.ml.KNearest_create() |
|
|
|
knn.train(train,train_labels) |
|
|
|
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels) |
|
|
|
ret,result,neighbours,dist = knn.find_nearest(test,k=5) |
|
|
|
ret,result,neighbours,dist = knn.findNearest(test,k=5) |
|
|
|
|
|
|
|
|
|
|
|
# Now we check the accuracy of classification |
|
|
|
# Now we check the accuracy of classification |
|
|
|
# For that, compare the result with test_labels and check which are wrong |
|
|
|
# For that, compare the result with test_labels and check which are wrong |
|
|
@ -103,9 +103,9 @@ responses, trainData = np.hsplit(train,[1]) |
|
|
|
labels, testData = np.hsplit(test,[1]) |
|
|
|
labels, testData = np.hsplit(test,[1]) |
|
|
|
|
|
|
|
|
|
|
|
# Initiate the kNN, classify, measure accuracy. |
|
|
|
# Initiate the kNN, classify, measure accuracy. |
|
|
|
knn = cv2.KNearest() |
|
|
|
knn = cv2.ml.KNearest_create() |
|
|
|
knn.train(trainData, responses) |
|
|
|
knn.train(trainData, cv2.ml.ROW_SAMPLE, responses) |
|
|
|
ret, result, neighbours, dist = knn.find_nearest(testData, k=5) |
|
|
|
ret, result, neighbours, dist = knn.findNearest(testData, k=5) |
|
|
|
|
|
|
|
|
|
|
|
correct = np.count_nonzero(result == labels) |
|
|
|
correct = np.count_nonzero(result == labels) |
|
|
|
accuracy = correct*100.0/10000 |
|
|
|
accuracy = correct*100.0/10000 |
|
|
|