Skip to content

Commit

Permalink
improve transform-seg
Browse files Browse the repository at this point in the history
add simple crop methods
add minRect to recognize mask
  • Loading branch information
X-Chen97 committed Apr 1, 2022
1 parent e62dd65 commit 29a1c3d
Show file tree
Hide file tree
Showing 3 changed files with 118 additions and 42 deletions.
11 changes: 11 additions & 0 deletions pv_vision/transform_seg/cell_crop.py
Expand Up @@ -250,6 +250,17 @@ def detect_peaks(split, direction, cell_size, busbar, thre=0.9, interval=None, m
return peaks


def plot_peaks(n, image, cell_size, busbar, split=None, split_size=None, direction=0, thre=0.9, interval=None, margin=None):
splits = split_img(image, split, split_size, direction)
split = splits[n]
sum_split = np.sum(split, axis=direction)
sum_split = sum_split / np.max(sum_split)
sum_split[sum_split > thre] = 1
peaks = detect_peaks(split, direction, cell_size, busbar, thre, interval, margin)
plt.plot(list(range(len(sum_split))), sum_split)
plt.scatter(peaks, sum_split[peaks])


def detect_vertical_lines(image_thre, column, cell_size, thre=0.8, split=100, peak_interval=None):
""" Detect vertical edges by segmenting image into horizontal splits
Expand Down
96 changes: 81 additions & 15 deletions pv_vision/transform_seg/perspective_transform.py
Expand Up @@ -72,7 +72,38 @@ def base64_2_mask(s):
return mask


def load_mask(path, image, mask_name='module_unet'):
def has_mask(mask_name, path=None, data=None):
"""Check if mask exists
Parameters
----------
mask_name: str
The annotation name of the mask.
path: str or pathlib.PosixPath
The path of annotation json file
data: dict
If provided, will not open path
Returns
-------
If exist, return the index in objects list
If not, return False
"""
if path is None and data is None:
raise ValueError("Mask file not provided.")
if path:
with open(path, 'r') as file:
data = json.load(file)

for inx, obj in enumerate(data["objects"]):
if obj['classTitle'] == mask_name:
return inx

return False


def load_mask(path, image, mask_name='module_unet', center=True):
"""Load the image of mask
Parameters
Expand All @@ -86,6 +117,9 @@ def load_mask(path, image, mask_name='module_unet'):
mask_name: str
The annotation name of the mask. Default is 'module_unet'.
center: bool
If True, return mask center.
Returns
-------
mask: array
Expand All @@ -96,14 +130,27 @@ def load_mask(path, image, mask_name='module_unet'):
"""
with open(path, 'r') as file:
data = json.load(file)
if len(data["objects"]) == 1:
code = data["objects"][0]["bitmap"]["data"]
origin = data["objects"][0]["bitmap"]["origin"]
# if len(data["objects"]) == 0:
# return None
# code = data["objects"][0]["bitmap"]["data"]
# origin = data["objects"][0]["bitmap"]["origin"]
# else:
# flag = True
# for obj in data["objects"]:
# if obj['classTitle'] == mask_name:
inx = has_mask(mask_name, data=data)
if inx is not False:
obj = data["objects"][inx]
code = obj["bitmap"]["data"]
origin = obj["bitmap"]["origin"]
else:
for obj in data["objects"]:
if obj['classTitle'] == mask_name:
code = obj["bitmap"]["data"]
origin = obj["bitmap"]["origin"]
mask = np.zeros((image.shape[0], image.shape[1]))
mask = mask.astype('uint8')
mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])
if center:
return mask, mask_center
else:
return mask
mask = base64_2_mask(code)
mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])
mask_center += origin
Expand All @@ -117,7 +164,10 @@ def load_mask(path, image, mask_name='module_unet'):
right = np.zeros((mask4.shape[0], image.shape[1] - mask4.shape[1]))
mask5 = np.hstack((mask4, right))

return mask5.astype('uint8'), mask_center.astype(int)
if center:
return mask5.astype('uint8'), mask_center.astype(int)
else:
return mask5.astype('uint8')


def find_intersection(mask_part, houghlinePara=50):
Expand Down Expand Up @@ -281,7 +331,7 @@ def find_module_corner(mask, mask_center, dist=200, displace=0, method=0, corner
return np.array(corners_order)


def perspective_transform(image, src, sizex, sizey):
def perspective_transform(image, src, sizex, sizey, rotate=True):
"""Do perspective transform on the solar modules. Orientation of the input module is auto-detected. The output
module has short side vertically arranged and long side horizontally arranged.
Expand All @@ -296,16 +346,24 @@ def perspective_transform(image, src, sizex, sizey):
sizex, sizey: int
size of the output image. x is the long side and y is the short side.
rotate: bool
If true, auto-detection of orientation is on.
Returns
-------
warped: array
Transformed image of solar module
"""
src = np.float32(src)
if np.sum((src[0] - src[2])**2) <= np.sum((src[0] - src[1])**2):
dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])
else:

if rotate and np.sum((src[0] - src[2])**2) > np.sum((src[0] - src[1])**2):
dst = np.float32([(0, sizey), (0, 0), (sizex, sizey), (sizex, 0)])
else:
dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])
#if np.sum((src[0] - src[2])**2) <= np.sum((src[0] - src[1])**2):
# dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])
#else:

M = cv.getPerspectiveTransform(src, dst)

warped = cv.warpPerspective(image, M, (sizex, sizey))
Expand Down Expand Up @@ -441,6 +499,7 @@ def find_module_corner2(mask, mode=0):
mode == 1: detect corners of the approximated convex of module
mode == 2: detect corners of the approximated contour of the module
mode == 3: detect corners of the blurred mask of the module
mode == 4: detect corners using boudingRect
Returns
-------
Expand All @@ -457,6 +516,13 @@ def find_module_corner2(mask, mode=0):
length = len(cnt)
inx = i

if mode == 4:
rect = cv.minAreaRect(contours[inx])
corners = cv.boxPoints(rect)
corners_sorted = sort_corners(np.array(corners))
corners_displaced = np.array([[-1, -1], [1, -1], [-1, 1], [1, 1]]) * 3 + corners_sorted
return corners_displaced

cnt_approx = cv.approxPolyDP(contours[inx], 8, True)
convex = cv.convexHull(contours[inx])
conv_approx = cv.approxPolyDP(convex, 8, True)
Expand All @@ -473,7 +539,7 @@ def find_module_corner2(mask, mode=0):
elif mode == 3:
corners = find_polygon_corners(blur)
else:
print("mode must be one of 0, 1, 2")
return
raise Exception("mode must be one of 0, 1, 2, 3, 4")

return corners

53 changes: 26 additions & 27 deletions pv_vision/transform_seg/solarmodule.py
Expand Up @@ -370,7 +370,7 @@ def corner_detection_cont(self, mode=0, output=False):
if output:
return self._corners

def transform(self, width=None, height=None, cellsize=None, img_only=True):
def transform(self, width=None, height=None, cellsize=None, auto_rotate=True, img_only=True):
"""Do perspective transform on the solar module
Parameters
Expand All @@ -381,6 +381,9 @@ def transform(self, width=None, height=None, cellsize=None, img_only=True):
cellsize: int
Edge length of a cell
auto_rotate: bool
If true, automatically adjust module orientation such that shorter side is vertically aligned.
img_only: bool
If true, only return the image of transformed module.
Otherwise return a transformed module instance
Expand All @@ -398,8 +401,8 @@ def transform(self, width=None, height=None, cellsize=None, img_only=True):
if cellsize:
width = self.col * cellsize
height = self.row * cellsize
wrap = transform.perspective_transform(self._image, self._corners, width, height)
self._transformed = wrap
wrap = transform.perspective_transform(self._image, self._corners, width, height, rotate=auto_rotate)
self._transformed = TransformedModule(wrap, self._row, self._col, self._busbar)
if img_only:
return wrap
else:
Expand All @@ -420,25 +423,15 @@ def is_transformed(self, x_min, y_min):
-------
bool
"""
res = TransformedModule(self._transformed, self._row,
self._col, self.busbar).is_transformed(x_min, y_min)
res = self._transformed.is_transformed(x_min, y_min)
return res

def crop_cell(self, cellsize, vl_interval=None, vl_split_size=None,
def crop_cell(self, cellsize, simple=False, vl_interval=None, vl_split_size=None,
hl_interval=None, hl_split_size=None, margin=None):
vinx_split, vline_split = seg.detect_edge(self._transformed, row_col=[self.row, self.col], cell_size=cellsize,
busbar=self.busbar, peaks_on=0, split_size=vl_split_size,
peak_interval=vl_interval, margin=margin)
vline_abs = seg.linear_regression(vinx_split, vline_split)
hinx_split, hline_split = seg.detect_edge(self._transformed, row_col=[self.row, self.col], cell_size=cellsize,
busbar=self.busbar, peaks_on=1, split_size=hl_split_size,
peak_interval=hl_interval, margin=margin)
hline_abs = seg.linear_regression(hinx_split, hline_split)
cells = self._transformed.crop_cell(cellsize, simple, vl_interval, vl_split_size,
hl_interval, hl_split_size, margin)

hline_abs_couple = seg.couple_edges(hline_abs, length=self.size[1])
vline_abs_couple = seg.couple_edges(vline_abs, length=self.size[0])

return np.array(seg.segment_cell(self._transformed, hline_abs_couple, vline_abs_couple, cellsize=cellsize))
return cells


class TransformedModule(AbstractModule):
Expand Down Expand Up @@ -468,16 +461,22 @@ def is_transformed(self, x_min, y_min):
peak_x, peak_y = transform.find_inner_edge(self._image)
return (len(peak_x) >= x_min) and (len(peak_y) >= y_min)

def crop_cell(self, cellsize, vl_interval=None, vl_split_size=None,
def crop_cell(self, cellsize, simple=False, vl_interval=None, vl_split_size=None,
hl_interval=None, hl_split_size=None, margin=None):
vinx_split, vline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
busbar=self.busbar, peaks_on=0, split_size=vl_split_size,
peak_interval=vl_interval, margin=margin)
vline_abs = seg.linear_regression(vinx_split, vline_split)
hinx_split, hline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
busbar=self.busbar, peaks_on=1, split_size=hl_split_size,
peak_interval=hl_interval, margin=margin)
hline_abs = seg.linear_regression(hinx_split, hline_split)
if simple:
vline_abs = list(zip(np.zeros(self.col - 1),
np.linspace(0, self.size[0], self.col + 1)[1: -1].astype(int)))
hline_abs = list(zip(np.zeros(self.row - 1),
np.linspace(0, self.size[1], self.row + 1)[1: -1].astype(int)))
else:
vinx_split, vline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
busbar=self.busbar, peaks_on=0, split_size=vl_split_size,
peak_interval=vl_interval, margin=margin)
vline_abs = seg.linear_regression(vinx_split, vline_split)
hinx_split, hline_split = seg.detect_edge(self._image, row_col=[self.row, self.col], cell_size=cellsize,
busbar=self.busbar, peaks_on=1, split_size=hl_split_size,
peak_interval=hl_interval, margin=margin)
hline_abs = seg.linear_regression(hinx_split, hline_split)

hline_abs_couple = seg.couple_edges(hline_abs, length=self.size[1])
vline_abs_couple = seg.couple_edges(vline_abs, length=self.size[0])
Expand Down

0 comments on commit 29a1c3d

Please sign in to comment.