python - Scipy optimize specify parameter step size, false positive convergence -
i'm using scipy optimize biomedical imaging application. i'm trying write algorithm align 2 images finding affine transform (shifting, scaling, , rotation, in case) between two. i'm using optimizer minimize difference between 2 images. parameters i'm trying optimize x , y shifts, rotation center of image, , scaling in x , y dimensions. ideally, step sizes close whole numbers rather on order of 1e-6 (current step size). there way specify minimum step sizes used parameters in scipy optimize?
i'm mapping them proper order of magnitude, works parameter or 2 @ time nelder-mead method. (i'm aiming use l-bfgs-b because of gradient, that's whole other set of problems @ moment.) adding more parameters causes optimization think misaligned image correct answer. "false positive" result typical of scipy optimize? there way force more accuracy aside changing tolerance?
working code:
class toy(): def __init__(self, img1, img2): self.original = img1 self.shifted = img2 def find_affine(self, parameters): # extract parameters x = 0 y = 0 r = 0.0 sx = 1.0 sy = 1.0 # x = int(parameters[0]*10000) # y = int(parameters[1]*10000) # r = int(parameters[2]*10000) # sx = 1.0 + abs(round(parameters[3]*1000, 1)) # sy = 1.0 + abs(round(parameters[4]*1000, 1)) print "updated parameters: [" + str(x) + ", " + str(y) + ", " + str(r) + ", " + str(sx) + ", " + str(sy) + "]" # image data original_dims = [len(self.original), len(self.original[0])] shifted_dims = [len(self.shifted), len(self.shifted[0])] mod_orig = self.original # set rotation if r != 0.0: rotation = cv2.getrotationmatrix2d((original_dims[0]/2, original_dims[1]/2), r, 1) mod_orig = cv2.warpaffine(mod_orig, rotation, (len(self.original), len(self.original[0]))) # set scaling if sx != 1.0 or sy != 1.0: dims = [int(round(sx*original_dims[0])), int(round(sy*original_dims[1]))] mod_orig = cv2.resize(mod_orig, (dims[0], dims[1]), interpolation=cv2.inter_cubic) original_dims = [len(mod_orig), len(mod_orig[0])] print "new dimensions: " + str(len(mod_orig)) + " " + str(len(mod_orig[0])) # set translation shift = [y*sy, x*sx, 0] # need scaled? # convert pixels # set overlap bounds: x if (x <= 0): rows_orig = [abs(shift[1]), original_dims[1]] rows_shift = [0, shifted_dims[1]+shift[1]] else: rows_shift = [shift[1], shifted_dims[1]] rows_orig = [0, original_dims[1]-shift[1]] # set overlap bounds: y if (y <= 0): cols_orig = [abs(shift[0]), original_dims[0]] cols_shift = [0, shifted_dims[0]+shift[0]] else: cols_shift = [shift[0], shifted_dims[0]] cols_orig = [0, original_dims[0]-shift[0]] # relevant pixels original_overlap = mod_orig[cols_orig[0]:cols_orig[1], rows_orig[0]:rows_orig[1]] shifted_overlap = self.shifted[cols_shift[0]:cols_shift[1], rows_shift[0]:rows_shift[1]] self.show_overlap(original_overlap, shifted_overlap) return (original_overlap, shifted_overlap) def objective(self, parameters): stable_pts, warped_pts = self.find_affine(parameters) stable_pts = stable_pts.flatten() warped_pts = warped_pts.flatten() n = max(len(stable_pts), len(warped_pts)) print "number of points: " + str(n) if n == 0: return 99999999999 score = 0.0 s_pt, w_pt in zip(stable_pts, warped_pts): score = score + ((float(s_pt) - float(w_pt))**2) score = score/n print "objective score: " + str(score) + " x = " + str(parameters) return score def show_overlap(self, img1, img2): print "dims" print " img1 dims: " + str(len(img1)) + " " + str(len(img1[0])) print " img2 dims: " + str(len(img2)) + " " + str(len(img2[0])) dims = [min(len(img1), len(img2)), min(len(img1[0]), len(img2[0]))] overlap = numpy.zeros(dims) x in xrange(len(overlap)): y in xrange(len(overlap[0])): overlap[x][y] = abs(int(img1[x][y])-int(img2[x][y])) overlap = numpy.uint8(overlap) cv2.imshow("overlapping image", overlap) cv2.waitkey(500) cv2.destroyallwindows() def _main(status): # read in images img1 = cv2.imread("data/smile_orig.png") img2 = cv2.imread("data/smiletest.png") # convert grayscale img1 = cv2.cvtcolor(img1, cv2.color_bgr2gray) img2 = cv2.cvtcolor(img2, cv2.color_bgr2gray) test = toy(img1, img2) initial_conditions = [0.0] transform = optimize.minimize(test.objective, initial_conditions, method='nelder-mead', tol=1e-8) # show minimization @ end params = transform.x original_img, shifted_img = test.find_affine(params) print "completed minimization" print transform # show difference between transformed image , desired image overlap = numpy.zeros([min(len(original_img), len(shifted_img)), min(len(original_img[0]), len(shifted_img[0]))]) print "shape of overlap: " + str(overlap.shape) x in xrange(len(overlap)): y in xrange(len(overlap[0])): overlap[x][y] = abs(int(original_img[x][y])-int(shifted_img[x][y])) overlap = numpy.uint8(overlap) cv2.imshow("overlapping image", overlap) print "final image shown" print "dimensions: " + str(len(overlap)) + " " + str(len(overlap[0])) cv2.waitkey(0) cv2.destroyallwindows() if __name__ == "__main__": _main(status)
images used:
results:
completed minimization status: 0 nfev: 95 success: true fun: 11.051886006332982 x: array([ -1.29629630e-04, 8.42592593e-04, 2.77777778e-05]) message: 'optimization terminated successfully.' nit: 29
with parameter mapping, transform achieved x dimension shift of -1, y dimension shift of 8, , rotation of 0. actual affine transform between 2 images x dimension shift of 10, y dimension shift of 20, , rotation of -10.
difference between 2 images after transform found:
it's close, has low score, needs closer (ideally perfect score of 0.0, or @ least nothing left smiley face). suggestions welcome.
related:
Comments
Post a Comment