OpenCV 3.0.0 added the ability to specify a mask while performing templateMatch. When I specify a mask I get this error: error: (-215) (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 in function matchTemplateMask
Template image (PNG with transparency):
Source image:
Code
# read the template emoji with the alpha channel
template = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED)
channels = cv2.split(template)
zero_channel = np.zeros_like(channels[0])
mask = np.array(channels[3])
# all elements in alpha_channel that have value 0 are set to 1 in the mask matrix
mask[channels[3] == 0] = 1
# all elements in alpha_channel that have value 100 are set to 0 in the mask matrix
mask[channels[3] == 100] = 0
transparent_mask = cv2.merge([zero_channel, zero_channel, zero_channel, mask])
print image.shape, image.dtype # (72, 232, 3) uint8
print template.shape, template.dtype # (40, 40, 4) uint8
print transparent_mask.shape, transparent_mask.dtype # (40, 40, 4) uint8
# find the matches
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED, mask=transparent_mask)
Is something wrong with the image type? I am unable to find any examples (in Python) using the new mask parameter of the matchTemplate method. Does anyone know how to create the mask?
I was able to get this to work using Python 2.7.13 and opencv-python==3.1.0.4
Here is the code for it.
import cv2
import numpy as np
import sys
if len(sys.argv) < 3:
print 'Usage: python match.py <template.png> <image.png>'
sys.exit()
template_path = sys.argv[1]
template = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
channels = cv2.split(template)
zero_channel = np.zeros_like(channels[0])
mask = np.array(channels[3])
image_path = sys.argv[2]
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
mask[channels[3] == 0] = 1
mask[channels[3] == 100] = 0
# transparent_mask = None
# According to http://www.devsplanet.com/question/35658323, we can only use
# cv2.TM_SQDIFF or cv2.TM_CCORR_NORMED
# All methods can be seen here:
# http://docs.opencv.org/2.4/doc/tutorials/imgproc/histograms/template_matching/template_matching.html#which-are-the-matching-methods-available-in-opencv
method = cv2.TM_SQDIFF # R(x,y) = \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2 (essentially, sum of squared differences)
transparent_mask = cv2.merge([zero_channel, zero_channel, zero_channel, mask])
result = cv2.matchTemplate(image, template, method, mask=transparent_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print 'Lowest squared difference WITH mask', min_val
# Now we'll try it without the mask (should give a much larger error)
transparent_mask = None
result = cv2.matchTemplate(image, template, method, mask=transparent_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print 'Lowest squared difference WITHOUT mask', min_val
Here it is as a gist.
Essentially, you need to make sure you're using the right matching method.
My environment is using opencv 3.1.0 and python 2.7.11.
Here is the code that is looking for images in another image where the template is using transparency (alpha channel). I hope this can help you.
def getMultiFullInfo(all_matches,w,h):
#This function will rearrange the data and calculate the tuple
# for the square and the center and the tolerance for each point
result = []
for match in all_matches:
tlx = match[0]
tly = match[1]
top_left = (tlx,tly)
brx = match[0] + w
bry = match[1] + h
bottom_right = (brx,bry)
centerx = match[0] + w/2
centery = match[1] + h/2
center = [centerx,centery]
result.append({'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':match[2]})
return result
def getMulti(res, tolerance,w,h):
#We get an opencv image in the form of a numpy array and we need to
# find all the occurances in there knowing that 2 squares cannot intersect
#This will give us exactly the matches that are unique
#First we need to get all the points where value is >= tolerance
#This wil get sometimes some squares that vary only from some pixels and that are overlapping
all_matches_full = np.where (res >= tolerance)
logging.debug('*************Start of getMulti function')
logging.debug('All >= tolerance')
logging.debug(all_matches_full)
#Now we need to arrange it in x,y coordinates
all_matches_coords = []
for pt in zip(*all_matches_full[::-1]):
all_matches_coords.append([pt[0],pt[1],res[pt[1]][pt[0]]])
logging.debug('In coords form')
logging.debug(all_matches_coords)
#Let's sort the new array
all_matches_coords = sorted(all_matches_coords)
logging.debug('Sorted')
logging.debug(all_matches_coords)
#This function will be called only when there is at least one match so if matchtemplate returns something
#This means we have found at least one record so we can prepare the analysis and loop through each records
all_matches = [[all_matches_coords[0][0],all_matches_coords[0][1],all_matches_coords[0][2]]]
i=1
for pt in all_matches_coords:
found_in_existing = False
logging.debug('%s)',i)
for match in all_matches:
logging.debug(match)
#This is the test to make sure that the square we analyse doesn't overlap with one of the squares already found
if pt[0] >= (match[0]-w) and pt[0] <= (match[0]+w) and pt[1] >= (match[1]-h) and pt[1] <= (match[1]+h):
found_in_existing = True
if pt[2] > match[2]:
match[0] = pt[0]
match[1] = pt[1]
match[2] = res[pt[1]][pt[0]]
if not found_in_existing:
all_matches.append([pt[0],pt[1],res[pt[1]][pt[0]]])
i += 1
logging.debug('Final')
logging.debug(all_matches)
logging.debug('Final with all info')
#Before returning the result, we will arrange it with data easily accessible
all_matches = getMultiFullInfo(all_matches,w,h)
logging.debug(all_matches)
logging.debug('*************End of getMulti function')
return all_matches
def checkPicture(screenshot,templateFile, tolerance, multiple = False):
#This is an intermediary function so that the actual function doesn't include too much specific arguments
#We open the config file
configFile = 'test.cfg'
config = SafeConfigParser()
config.read(configFile)
basepics_dir = config.get('general', 'basepics_dir')
debug_dir = config.get('general', 'debug_dir')
font = cv2.FONT_HERSHEY_PLAIN
#The value -1 means we keep the file as is meaning with color and alpha channel if any
# btw, 0 means grayscale and 1 is color
template = cv2.imread(basepics_dir+templateFile,-1)
#Now we search in the picture
result = findPicture(screenshot,template, tolerance, multiple)
#If it didn't get any result, we log the best value
if not result['res']:
logging.debug('Best value found for %s is: %f',templateFile,result['best_val'])
elif logging.getLogger().getEffectiveLevel() == 10:
screenshot_with_rectangle = screenshot.copy()
for pt in result['points']:
cv2.rectangle(screenshot_with_rectangle, pt['top_left'], pt['bottom_right'], 255, 2)
fileName_top_left = (pt['top_left'][0],pt['top_left'][1]-10)
cv2.putText(screenshot_with_rectangle,str(pt['tolerance'])[:4],fileName_top_left, font, 1,(255,255,255),2)
#Now we save to the file if needed
filename = time.strftime("%Y%m%d-%H%M%S") + '_' + templateFile[:-4] + '.jpg'
cv2.imwrite(debug_dir + filename, screenshot_with_rectangle)
result['name']=templateFile
return result
def extractAlpha(img, hardedge = True):
if img.shape[2]>3:
logging.debug('Mask detected')
channels = cv2.split(img)
mask = np.array(channels[3])
if hardedge:
for idx in xrange(len(mask[0])):
if mask[0][idx] <=128:
mask[0][idx] = 0
else:
mask[0][idx] = 255
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
return {'res':True,'image':img,'mask':mask}
else:
return {'res':False,'image':img}
def findPicture(screenshot,template, tolerance, multiple = False):
#This function will work with color images 3 channels minimum
#The template can have an alpha channel and we will extract it to have the mask
logging.debug('Looking for %s' , template)
logging.debug('Tolerance to check is %f' , tolerance)
logging.debug('*************Start of checkPicture')
h = template.shape[0]
w = template.shape[1]
#We will now extract the alpha channel
tmpl = extractAlpha(template)
logging.debug('Image width: %d - Image heigth: %d',w,h)
# the method used for comparison, can be ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
meth = 'cv2.TM_CCORR_NORMED'
method = eval(meth)
# Apply template Matching
if tmpl['res']:
res = cv2.matchTemplate(screenshot,tmpl['image'],method, mask = tmpl['mask'])
else:
res = cv2.matchTemplate(screenshot,tmpl['image'],method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
best_val = 1 - min_val
else:
top_left = max_loc
best_val = max_val
#We need to ensure we found at least one match otherwise we return false
if best_val >= tolerance:
if multiple:
#We need to find all the time the image is found
all_matches = getMulti(res, float(tolerance),int(w),int(h))
else:
bottom_right = (top_left[0] + w, top_left[1] + h)
center = (top_left[0] + (w/2), top_left[1] + (h/2))
all_matches = [{'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':best_val}]
#point will be in the form: [{'tolerance': 0.9889718890190125, 'center': (470, 193), 'bottom_right': (597, 215), 'top_left': (343, 172)}]
logging.debug('The points found will be:')
logging.debug(all_matches)
logging.debug('*************End of checkPicture')
return {'res': True,'points':all_matches}
else:
logging.debug('Could not find a value above tolerance')
logging.debug('*************End of checkPicture')
return {'res': False,'best_val':best_val}
In OpenCV 4.2.0 the first two suggested codes result for me in the following error:
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\templmatch.cpp:766: error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 in function 'cv::matchTemplateMask'
It looks like things have got much easier in the meantime. Here is my Python code that I tried to reduce at its maximum. The file "crowncap_85x85_mask.png" is a black and white image. All black pixels in the mask will be ignored during matching.
Still only the matching methods TM_SQDIFF and TM_CCORR_NORMED are supported when using a mask.
import cv2 as cv
img = cv.imread("fridge_zoomed.png", cv.IMREAD_COLOR)
templ = cv.imread("crowncap_85x85.png", cv.IMREAD_COLOR)
mask = cv.imread( "crowncap_85x85_mask.png", cv.IMREAD_COLOR )
result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)
cv.imshow("Matching with mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation WITH mask', max_val)
result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)
cv.imshow("Matching without mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation without mask', max_val)
while True:
if cv.waitKey(10) == 27:
break
cv.destroyAllWindows()
If you want to generate the mask from the alpha channel of your template, you can proceed as follows:
import cv2 as cv
import numpy as np
img = cv.imread("fridge_zoomed.png", cv.IMREAD_COLOR)
templ = cv.imread("crowncap_85x85_transp.png", cv.IMREAD_COLOR)
templ_incl_alpha_ch = cv.imread("crowncap_85x85_transp.png", cv.IMREAD_UNCHANGED)
channels = cv.split(templ_incl_alpha_ch)
#extract "transparency" channel from image
alpha_channel = np.array(channels[3])
#generate mask image, all black dots will be ignored during matching
mask = cv.merge([alpha_channel,alpha_channel,alpha_channel])
cv.imshow("Mask", mask)
result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)
cv.imshow("Matching with mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation WITH mask', max_val)
result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)
cv.imshow("Matching without mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation without mask', max_val)
while True:
if cv.waitKey(10) == 27:
break
cv.destroyAllWindows()
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With