0

I have trained a keras model on CNN and been able to classify multiple handwritten digits . It classifies making a bounding box and crops the digit and resizes to 45,45 dataset training size , but the bounding box order is not specific. I want to sequentially define form left to right .

Image-Contours

from tensorflow.keras.preprocessing import image
import cv2
import matplotlib.pyplot as plt

# img  = cv2.imread("5.png" )
img  = cv2.imread("1+2.png" )
img_copy = img.copy()
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.imshow(img, cmap = 'gray')
plt.show()

# img = image.img_to_array(img, dtype='uint8')
(thresh, img_bin) = cv2.threshold(img_gray, 20, 255 , cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
plt.imshow(img_bin,cmap='gray')
plt.title('Threshold: {}'.format(thresh))
plt.show()

cv2.floodFill(img_bin, None, (0, 0), 0)
plt.imshow(img_bin,)

# Get each bounding box
# Find the big contours/blobs on the filtered image:

# Bug use img_gray not binary image 
contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

from google.colab.patches import cv2_imshow
from keras.preprocessing import image
import numpy as np
predictions  = []
# Look for the outer bounding boxes (no children):
current_crop = [] # hold the cropped images
for _, c in enumerate(contours):

  # Get the bounding rectangle of the current contour:
  boundRect = cv2.boundingRect(c)

  # Get the bounding rectangle data:
  rectX = boundRect[0]
  rectY = boundRect[1]
  rectWidth = boundRect[2]
  rectHeight = boundRect[3]
  
  # Estimate the bounding rect area:
  rectArea = rectWidth * rectHeight
  # print(rectArea)
  # Set a min area threshold
  minArea =40

  # Filter blobs by area:
  if rectArea > minArea:

    # Draw bounding box:
    color = (255, 0, 255)
    cv2.rectangle(img_copy, (int(rectX), int(rectY)),
                  (int(rectX + rectWidth), int(rectY + rectHeight)), color, 1)

    # Crop bounding box:
    currentCrop = img[rectY:rectY+rectHeight,rectX:rectX+rectWidth]
    current_crop.append(currentCrop)
    # Resize image to (45,45)
    test_image = cv2.resize(currentCrop, (45,45), interpolation = cv2.INTER_AREA)
    #change to PIL format
    # test_image  = cropped_image
    test_image = image.img_to_array(test_image) 
    test_image = np.expand_dims(test_image , axis = 0) #extra dimension to batch
    result = global_model.predict(test_image)
    result = lb.inverse_transform(result)
    result1 = "".join(str(e) for e in result)
    predictions.append(result1)
    print(predictions)
    # cv2.putText(img_copy ,result1, (int(rectX), int(rectY)-5 ),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
    cv2_imshow(currentCrop)
    # img_copy = cv2.resize(img_copy, (100,100), interpolation = cv2.INTER_AREA)
    # cv2_imshow(img_copy)
    plt.imshow(img_copy, cmap = 'gray')
    plt.show()

    cv2.waitKey(0)

The countours dont have specific direction . enter image description here

0 Answers0