Closed HaleySchuhl closed 11 months ago
I've made a different implementation of pcv.transform.detect_color_card
that has worked well on different scales and rotations of color card images. It detects square contours like the color card squares similarly to how the existing implementation detects them, but it then maps the positions of the squares to their actual positions in the color card based on the card's rotation.
def colorCardMask(thisImage, nrows = 6, ncols = 4):
def isSquare(contour):
return (cv2.contourArea(contour) > 1000 and
max(cv2.minAreaRect(contour)[1]) / min(cv2.minAreaRect(contour)[1]) < 1.2 and
(cv2.contourArea(contour) / np.prod(cv2.minAreaRect(contour)[1])) > 0.8)
imgray = cv2.cvtColor(thisImage,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray, int(np.mean([np.median(thisImage), np.median(thisImage)])),255,cv2.THRESH_BINARY)
thresh = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,127,2)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
filteredContours = [contour for contour in contours if isSquare(contour)]
targetSquareArea = np.median([cv2.contourArea(filteredContour) for filteredContour in filteredContours])
filteredContours = [contour for contour in filteredContours if (0.8 < (cv2.contourArea(contour) / targetSquareArea) < 1.2)]
if len(filteredContours) == 0:
return [], thisImage
targetMask = np.zeros(imgray.shape)
rect = np.concatenate([[np.array(cv2.minAreaRect(i)[0]).astype(int)] for i in filteredContours])
rect = cv2.minAreaRect(rect)
corners = np.int0(cv2.boxPoints(rect))
cyanIndex = np.argmin([np.mean(math.dist(thisImage[corner[1], corner[0],:], (255,255,0))) for corner in corners])
corners = corners[np.argsort([math.dist(corner, corners[cyanIndex]) for corner in corners])[[0,1,3,2]]]
increment = 100 # increment amount is arbitrary since cell distances will be rescaled during perspective transform
centers = [[int(0 + i * increment), int(0 + j * increment)] for j in range(nrows) for i in range(ncols)]
newRect = cv2.minAreaRect(np.array(centers))
boxPoints = cv2.boxPoints(newRect).astype("float32")
M = cv2.getPerspectiveTransform(boxPoints, corners.astype("float32"))
newCenters = cv2.transform(np.array([centers]), M)[0][:,0:2]
thisSequence = np.array([i for i in range(nrows * ncols)])
for i in range(len(newCenters)):
cv2.circle(targetMask, newCenters[i], 15, int(thisSequence[i]) * 10, -1)
return newCenters, targetMask
The following code (which I used with the image below) is a quick way to test how this implementation works with rotations.
image = cv2.imread(<path to image with color card>)
for i in range(5):
image = pcv.transform.rotate(image, 60, False)
plt.figure()
plt.imshow(np.flip(image, 2))
_, mask = colorCardMask(image)
plt.figure()
plt.imshow(mask)
Same algorithm but updated syntax.
import cv2
import numpy as np
from plantcv.plantcv._helpers import _cv2_findcontours
def detect_color_card(img, min_size=1000):
debug = pcv.params.debug
pcv.params.debug = None
# Convert to grayscale
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# blur
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
# Otsu threshold
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Find contours
cnts, _ = _cv2_findcontours(bin_img=threshold)
# Initialize a mask
cc_mask = np.zeros(threshold.shape, dtype=np.uint8)
# Loop over each contour
for cnt in cnts:
# Make a submask
cnt_mask = np.zeros(threshold.shape, dtype=np.uint8)
# Check if the contour is similar to a square
if cv2.matchShapes(cnt,
np.array([[0, 0], [1, 0], [1, 1], [0, 1]]),
cv2.CONTOURS_MATCH_I2,
0.0) < 0.015:
# Draw the contour on the submask
cnt_mask = cv2.drawContours(cnt_mask, [cnt], -1, (255), -1)
# Calculate the size of the submask
area = np.count_nonzero(cnt_mask)
if area > min_size:
# Apply the submask to the color card mask if bigger than the threshold
cc_mask = pcv.logical_or(cc_mask, cnt_mask)
rois = pcv.roi.auto_grid(mask=cc_mask, nrows=4, ncols=6, radius=15)
out_mask = np.zeros(threshold.shape, dtype=np.uint8)
for i, roi in enumerate(rois):
out_mask = cv2.drawContours(out_mask, roi.contours[0], -1, (i+1), -1)
pcv.params.debug = debug
return out_mask
Minor tweaks to suggested algorithm
from matplotlib import pyplot as plt
from plantcv.plantcv import fatal_error
import cv2
import numpy as np
import math
def isSquare(contour):
return (cv2.contourArea(contour) > 1000 and
max(cv2.minAreaRect(contour)[1]) / min(cv2.minAreaRect(contour)[1]) < 1.2 and
(cv2.contourArea(contour) / np.prod(cv2.minAreaRect(contour)[1])) > 0.8)
def colorCardMask(rgb_img, nrows = 6, ncols = 4):
"""Automatically detect a color card.
Algorithm written by mtwatso2-eng (github). Updated and implemented into PlantCV by Haley Schuhl.
Inputs:
rgb_img = Input RGB image data containing a color card.
nrows = number of rows
ncols = number of columns
Returns:
__ =
:param rgb_img: numpy.ndarray
:param nrows: int
:param ncols: int
:return df: pandas.core.frame.DataFrame
"""
imgray = cv2.cvtColor(rgb_img,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray, int(np.mean([np.median(thisImage), np.median(thisImage)])),255,cv2.THRESH_BINARY)
thresh = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,127,2)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
filteredContours = [contour for contour in contours if isSquare(contour)]
targetSquareArea = np.median([cv2.contourArea(filteredContour) for filteredContour in filteredContours])
filteredContours = [contour for contour in filteredContours if (0.8 < (cv2.contourArea(contour) / targetSquareArea) < 1.2)]
if len(filteredContours) == 0:
return [], rgb_img
# fatal_error('No color card found under current parameters')
targetMask = np.zeros(imgray.shape)
rect = np.concatenate([[np.array(cv2.minAreaRect(i)[0]).astype(int)] for i in filteredContours])
rect = cv2.minAreaRect(rect)
corners = np.int0(cv2.boxPoints(rect))
cyanIndex = np.argmin([np.mean(math.dist(rgb_img[corner[1], corner[0],:], (255,255,0))) for corner in corners])
whiteIndex = np.argmin([np.mean(math.dist(rgb_img[corner[1], corner[0],:], (255,255,255))) for corner in corners])
print(whiteIndex)
corners = corners[np.argsort([math.dist(corner, corners[whiteIndex]) for corner in corners])[[0,1,3,2]]]
increment = 100 # increment amount is arbitrary since cell distances will be rescaled during perspective transform
centers = [[int(0 + i * increment), int(0 + j * increment)] for j in range(nrows) for i in range(ncols)]
newRect = cv2.minAreaRect(np.array(centers))
boxPoints = cv2.boxPoints(newRect).astype("float32")
M = cv2.getPerspectiveTransform(boxPoints, corners.astype("float32"))
newCenters = cv2.transform(np.array([centers]), M)[0][:,0:2]
thisSequence = np.array([i for i in range(nrows * ncols)])
for i in range(len(newCenters)):
cv2.circle(targetMask, newCenters[i], 20, int(thisSequence[i] + 1) * 10, -1)
# ^ roi circle size
return newCenters, targetMask
from matplotlib import pyplot as plt
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
import cv2
import numpy as np
import math
def isSquare(contour):
return (cv2.contourArea(contour) > 1000 and
max(cv2.minAreaRect(contour)[1]) / min(cv2.minAreaRect(contour)[1]) < 1.2 and
(cv2.contourArea(contour) / np.prod(cv2.minAreaRect(contour)[1])) > 0.8)
def colorCardMask(rgb_img, nrows = 6, ncols = 4):
"""Automatically detect a color card.
Algorithm written by mtwatso2-eng (github). Updated and implemented into PlantCV by Haley Schuhl.
Inputs:
rgb_img = Input RGB image data containing a color card.
nrows = number of rows
ncols = number of columns
Returns:
__ =
:param rgb_img: numpy.ndarray
:param nrows: int
:param ncols: int
:return df: pandas.core.frame.DataFrame
"""
imgray = cv2.cvtColor(rgb_img,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray, int(np.mean([np.median(thisImage), np.median(thisImage)])),255,cv2.THRESH_BINARY)
thresh = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,127,2)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
filteredContours = [contour for contour in contours if isSquare(contour)]
targetSquareArea = np.median([cv2.contourArea(filteredContour) for filteredContour in filteredContours])
filteredContours = [contour for contour in filteredContours if (0.8 < (cv2.contourArea(contour) / targetSquareArea) < 1.2)]
if len(filteredContours) == 0:
return [], rgb_img
# fatal_error('No color card found under current parameters')
targetMask = np.zeros(imgray.shape)
rect = np.concatenate([[np.array(cv2.minAreaRect(i)[0]).astype(int)] for i in filteredContours])
rect = cv2.minAreaRect(rect)
corners = np.int0(cv2.boxPoints(rect))
cyanIndex = np.argmin([np.mean(math.dist(rgb_img[corner[1], corner[0],:], (255,255,0))) for corner in corners])
whiteIndex = np.argmin([np.mean(math.dist(rgb_img[corner[1], corner[0],:], (255,255,255))) for corner in corners])
print(whiteIndex)
corners = corners[np.argsort([math.dist(corner, corners[whiteIndex]) for corner in corners])[[0,1,3,2]]]
increment = 100 # increment amount is arbitrary since cell distances will be rescaled during perspective transform
centers = [[int(0 + i * increment), int(0 + j * increment)] for j in range(nrows) for i in range(ncols)]
newRect = cv2.minAreaRect(np.array(centers))
boxPoints = cv2.boxPoints(newRect).astype("float32")
M = cv2.getPerspectiveTransform(boxPoints, corners.astype("float32"))
newCenters = cv2.transform(np.array([centers]), M)[0][:,0:2]
thisSequence = np.array([i for i in range(nrows * ncols)])
debug_img = np.copy(rgb_img)
for i in range(len(newCenters)):
cv2.circle(targetMask, newCenters[i], 20, int(thisSequence[i] + 1) * 10, -1)
# ^ roi circle size
cv2.circle(debug_img, newCenters[i], 20, (255,255,0), -1)
# Debugging
_debug(visual=debug_img, filename=os.path.join(params.debug_outdir, str(params.device) + '_color_card.png'))
return newCenters, targetMask
add rough debug img
@all-contributors please add @mtwatso2-eng for code
@nfahlgren
I've put up a pull request to add @mtwatso2-eng! :tada:
Is your feature request related to a problem? Please describe. Our
pcv.transform.find_color_card
function has proven useful in the majority of cases but has some real limitations. The card being tilted can give a false positive that the color card was successfully located, and if the color card makes up too-small of a proportion of the entire image, or even just depending on lighting it's possible for color cards to fail during identification.Describe the solution you'd like
Describe alternatives you've considered A clear and concise description of any alternative solutions or features you've considered.
Additional context Add any other context, sample data, or code relevant to the feature request here.