# -*- coding: utf-8 -*-
import os
import time
import numpy as np
import cv2
#coding=utf-8
import numpy as np
import cv2
import os
from utils.data_augment_1114 import rotate_3
class Point(object):
x =0
y= 0
# Define construction method
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Line(object):
def __init__(self, x1,y1,x2,y2 ):
self.p1 = Point(x1,y1)
self.p2 = Point(x2,y2)
def GetLinePara(line):
line.a =line.p1.y - line.p2.y;
line.b = line.p2.x - line.p1.x;
line.c = line.p1.x *line.p2.y - line.p2.x * line.p1.y;
def GetCrossPoint(l1,l2):
GetLinePara(l1);
GetLinePara(l2);
d = l1.a * l2.b - l2.a * l1.b
p=Point()
if d==0:
return (-1,-1)
else:
p.x = int((l1.b * l2.c - l2.b * l1.c)*1.0 / d)
p.y = int((l1.c * l2.a - l2.c * l1.a)*1.0 / d)
return (p.x,p.y)
# coordinates of the number of intersections obtained
class Coordinates(object):
coord = []
def __init__(self):
self.centroidx = 0
self.centroidy = 0
self.corners = []
self.quad = 4
self.destination=[]
# find the Top right, Top left, Bottom right and Bottom left points
def calculateTRTLBRBL(self):
topoints = []
bottompoints = []
for tmp_cord in self.coord:
#tmp_cord has x and y ,len(tmp_cord)==2
if len(tmp_cord)==2:
if tmp_cord[1] < self.centroidy:
topoints.append(tmp_cord)
else:
bottompoints.append(tmp_cord)
if len(topoints)>1 and len(bottompoints)>1:
top_left = min(topoints)
top_right = max(topoints)
bottom_right = max(bottompoints)
bottom_left = min(bottompoints)
self.corners.append(top_left)
self.corners.append(top_right)
self.corners.append(bottom_right)
self.corners.append(bottom_left)
return self.corners
def rotate(img, angle):
height = img.shape[0]
width = img.shape[1]
if angle % 180 == 0:
scale = 1
elif angle % 90 == 0:
scale = float(max(height, width)) / min(height, width)
else:
scale = np.math.sqrt(pow(height, 2) + pow(width, 2)) / min(height, width)
rotateMat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, scale)
rotateImg = cv2.warpAffine(img, rotateMat, (width, height))
return rotateImg # rotated image
# path=r"C:\Users\libanggeng\Pictures\yaozong/"
path=r"E:\data\0/"
# path=r"E:\data\error/"
right=0
wrong=[]
total=0
image_dir=r"E:\data\barcode\train\13_3_new\tmp"
g = os.walk(image_dir)
img_name_list = ['%s\\%s' % (i[0], j) for i in g for j in i[-1] if
j.endswith('jpg') or j.endswith('png')]
h_new=472
for filename in img_name_list:
img_o = cv2.imread(filename)
img=img_o.copy()
ertu=img_o.copy()
ertu[::]=0
pheight, pwidth = img.shape[:2]
height, width = img.shape[:2]
hrate=1
# height = int(pheight/hrate)
# width=int(pwidth /hrate)
# img = cv2.resize(img,(int(width) , int(height)))
start=time.time()
gray = cv2.cvtColor(img_o, cv2.COLOR_BGR2GRAY)
# Two valued
# ret, binary = cv2.threshold(gray, 125, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # This works best
# Get structure elements
k = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# Open operation
binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, k)
# cv2.imshow("binary", binary)
# Contour discovery
contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in range(len(contours)):
# Is it a convex hull
ret = cv2.isContourConvex(contours[c])
if not ret:
# Convex hull detection
hulls = cv2.convexHull(contours[c])
hull_area = cv2.contourArea(hulls)
if hull_area<100*100:
continue
if hull_area<(0.1*width*height):
continue
total = len(hulls)
# for i in range(len(hulls)):
# x1, y1 = hulls[i % total][0]
# x2, y2 = hulls[(i + 1) % total][0]
# # cv2.circle(src2, (x1, y1), 4, (255, 0, 0), 2, 8, 0)
# cv2.line(img_o, (x1, y1), (x2, y2), (0, 0, 255), 2, 8, 0)
# print("convex : ", total)
if len(hulls)>3:
cenx = np.mean(hulls, axis=0)
centroidx = cenx[0][0]
centroidy = cenx[0][1]
heiimg = cv2.drawContours(ertu, [hulls], 0, (0, 0, 255), 1)
heiimg = cv2.cvtColor(heiimg, cv2.COLOR_BGR2GRAY)
lines = cv2.HoughLines(heiimg, 1, np.pi / 180, 40)
mlines=[]
if lines is not None and len(lines)>3:
print("hull:", len(hulls), "lines:", len(lines))
for point in lines:
for r, theta in point:
vcos = np.cos(theta)
vsin = np.sin(theta)
x0 = vcos * r
y0 = vsin * r
x1 = int(x0 + h_new * (-vsin))
y1 = int(y0 + h_new * (vcos))
x2 = int(x0 - h_new * (-vsin))
y2 = int(y0 - h_new * (vcos))
mlines.append((x0,y0,theta))
# The below for loop runs till r and theta values
# are in the range of the 2d array
values=[]
points=[]
for j in range(4):
aa=[]
values.append(aa)
points.append(aa)
topvalue=[]# The vertices
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ompactness, labels, centers = cv2.kmeans(np.array(mlines), 4,None,criteria, 20, cv2.KMEANS_RANDOM_CENTERS)
for point, allocation in zip(lines, labels):
for r, theta in point:
vcos = np.cos(theta)
vsin = np.sin(theta)
x0 = vcos * r
y0 = vsin * r
x1 = int(x0 + h_new * (-vsin))
y1 = int(y0 + h_new * (vcos))
x2 = int(x0 - h_new * (-vsin))
y2 = int(y0 - h_new * (vcos))
# cv2.line(hei, (x1, y1), (x2, y2), (255, 255, 255), 2)
if allocation == 0:
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
values[0].append([x1,y1])
values[0].append([x2, y2])
elif allocation == 1:
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
values[1].append([x1, y1])
values[1].append([x2, y2])
elif allocation == 2:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 1)
values[2].append([x1, y1])
values[2].append([x2, y2])
elif allocation == 3:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 0), 1)
values[3].append([x1, y1])
values[3].append([x2, y2])
else :
print( allocation)
print(len(labels))
for j in range(4):
[vx, vy, x, y] = cv2.fitLine(np.float32(values[j]), cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx) + y)
righty = int(((width - x) * vy / vx) + y)
# cv2.line(hei, (width - 1, righty), (0, lefty), (0, 255, 255), 1)
topvalue.append([lefty, righty])
line0 = Line(0, topvalue[0][0], width - 1, topvalue[0][1])
line1 = Line(0, topvalue[1][0], width - 1, topvalue[1][1])
line2 = Line(0, topvalue[2][0], width - 1, topvalue[2][1])
line3 = Line(0, topvalue[3][0], width - 1, topvalue[3][1])
pp=[]
pp.append(GetCrossPoint(line0, line1))
pp.append( GetCrossPoint(line0, line2))
pp.append( GetCrossPoint(line0, line3))
pp.append(GetCrossPoint(line1, line3))
pp.append(GetCrossPoint(line1, line2))
pp.append(GetCrossPoint(line2, line3))
pp=np.array(pp)
b = pp[np.where((pp[:, 0] >= -width/3) & (pp[:, 0] <= width) & (pp[:, 1] >=-height/3)& (pp[:, 1] <=height))]
if len(b)!=4:
continue
for x,y in b:
if x>centroidx and y>centroidy:
points[3]=(x,y)
elif x>centroidx and y< centroidy:
points[0] = (x, y)
elif x < centroidx and y < centroidy:
points[1] = (x, y)
elif x < centroidx and y > centroidy:
points[2] = (x, y)
my_coordinate = Coordinates()
if len(points[0])>2 or len(points[0])<2 or len(points[0])<2 or len(points[2])<2 or len(points[3])<2:
# cv2.imshow(filename,img)
continue
cenx = np.mean(np.array(points), axis=0)
my_coordinate.centroidx = cenx[0]
my_coordinate.centroidy = cenx[1]
# Template size
# coord.destination = np.float32([[0, 0], [pwidth, 0], [pwidth, pheight], [0, pheight]])
my_coordinate.destination = np.float32([[0, 0], [h_new, 0], [h_new, h_new], [0, h_new]])
# coord.size += 1
my_coordinate.coord = points
# Actual size
corners =np.array(my_coordinate.calculateTRTLBRBL())
if len(corners)==0:
continue
for aa in corners:
aa[0] =aa[0]* hrate
aa[1] =aa[1]* hrate
for point in corners:
cv2.circle(img_o, (point[0], point[1]), 4, (255, 0, 0), 2, 8, 0)
print("time",time.time()-start)
cv2.imshow("dian", img_o)
# # Get perspective transformation parameters
transformationMatrix = cv2.getPerspectiveTransform(corners.astype(np.float32), my_coordinate.destination)
minVal = np.min(my_coordinate.destination[np.nonzero(my_coordinate.destination)])
# print "minVal", minVal, "width", self.shape[0]
maxVal = np.max(my_coordinate.destination[np.nonzero(my_coordinate.destination)])
# print "maxVal", maxVal, "height", self.shape[1]
# Perspective transformation
warpedImage = cv2.warpPerspective(img_o, transformationMatrix,
(h_new, h_new)) # (pwidth/2,pheight/2))#
# warpedImage, transformationMatrix
# cv2.imshow("image", warpedImage)
# cv2.waitKey(0)
# gray = cv2.cvtColor(warpedImage, cv2.cv.CV_BGR2GRAY)
blur = cv2.GaussianBlur(warpedImage, (5, 5), 2)
alpha = 1.5
beta = 1 - alpha # 1 - alpha
gamma = 0
sharpened = cv2.addWeighted(warpedImage, alpha, blur, beta, gamma)
cv2.imshow("sharpened"+filename, sharpened)
cv2.waitKey()
# The result is bad , Some draw diagonals directly , Internal approximation
# display picture
# cv2.namedWindow(filename, 0)# cv2.WINDOW_NORMAL)
# cv2.imshow(filename, img)
total=total+1
# cv2.imshow(x, img)
k = cv2.waitKey()
if k == ord("n"):
wrong.append(filename)
elif k==13:
right=right+1
else:
wrong.append(filename)
cv2.destroyWindow(filename)
cv2.destroyWindow("sharpened"+filename)
# cv2.destroyWindow("houghlines3.jpg")
print ("total:"+str(total)+" right:"+ str(right),"rate:",str(round(right*1.0/total,2)))
print (wrong)
# plt.imshow(img),plt.colorbar(),plt.show()