Commit a731c949 authored by Edward Andò's avatar Edward Andò
Browse files

[skip-ci] partial progress in removing reg and ps from scripts

parent 6294cbd5
Pipeline #56311 skipped
......@@ -111,7 +111,7 @@ if mpiRank == boss or not mpi:
assert(im1.shape == im2.shape), "\nim1 and im2 must have the same size! Exiting."
assert(im1.shape == lab1.shape), "\nim1 and lab1 must have the same size! Exiting."
###############################################################
# Analyse labelled volume in state 01 in order to get bounding
# boxes and centres of mass for correlation
......
This diff is collapsed.
......@@ -180,160 +180,7 @@ if args.PHIFILE is not None:
### This vvv is the pixelSearchOnGrid function from grid.py
"""
This function handles grid-based local correlation, offering an initial rough dispalcement-only guess.
At the moment matching of windows is done with a Normalised-Correlation-Coefficient approach.
Parameters
----------
im1 : 3D numpy array
A 3D image of greylevels defining a reference configuration for the pixel search
im2 : 3D numpy array
A deformed 3D image of greylevels
nodePositions : nPoints*3 numpy array
Array containing Z, Y, X positions of each point in the grid, as returned by ``makeGrid`` for example
defined at im1
halfWindowSize : 3-item list or int
Size of subvolumes to perform the image correlation on, as a data range taken either side of the voxel on which the node is placed.
The subvolume will be 2*halfWindowSize + 1 pixels on each side.
A general recommendation is to make this half the node spacing
searchRange : dictionary
Search range as a dictionary containing 3 keys: 'zRange', 'yRange', and 'xRange',
Each of which contains a list with two items
PhiField : nPoints*4*4 numpy array, optional
Optional field of ``F`` transformation operators defined for each node.
Currently, only the translational components of F will be taken into account.
Default = No displacement
minMaskCoverage : float, optional
Minimum number of pixels in a subvolume for it to be correlated (only considered in the case of im1mask).
Default = 0.5
im1mask : 3D boolean numpy array, optional
A mask for im1 which is true in the zones to correlate.
Default = None
greyThreshold : list of two floats, optional
Threshold for the mean greylevel in each im1 subvolume.
If the mean is below the first value or above the second value, the grid point is not correlated.
Default = [ -inf, inf ]
mpi : bool, optional (default = False)
Are we being called by an MPI run?
Returns
-------
Dictionary containing:
Keys
PhiField : nNodes*4*4 numpy array of floats
For each node, the measured transformatio operator (displacement only)
pixelSearchCC : nNodes numpy array of floats
For each node, the NCC score obtained
"""
# WARNING ------------------------- VVVVVVVVVVV--- gets easily overwritten, pass a .copy()!
def getImagettes(nodePosition, Phi, searchRange, boundingBox, im1, im2, im1mask, minMaskCoverage, greyThreshold):
returnStatus = 1
imagette1mask = None
initialDisplacement = Phi[0:3, 3].astype(int)
# Catch bad bounding boxes:
if numpy.all((boundingBox[1::2] - boundingBox[0::2]) > [0, 0, 0]) and numpy.all(boundingBox[1::2] - boundingBox[0::2] != [0, 0, 0]):
# 2020-09-25 OS and EA: Prepare startStop array for imagette 1 to be extracted with new slicePadded
PhiNoDisp = Phi.copy()
PhiNoDisp[0:3,-1] = 0.0
# If F is not the identity, create a pad to be able to apply F to imagette 1
if numpy.allclose(PhiNoDisp, numpy.eye(4)):
applyPhiPad = 0
else:
# 2020-10-06 OS and EA: Add a pad to each dimension of 25% of max(halfWindowSize) to allow space to apply F (no displacement) to imagette1
applyPhiPad = int(0.5*numpy.ceil(max(boundingBox[1::2]-boundingBox[0::2])))
startStopIm1 = [int(boundingBox[0] - applyPhiPad), int(boundingBox[1] + applyPhiPad + 1),
int(boundingBox[2] - applyPhiPad), int(boundingBox[3] + applyPhiPad + 1),
int(boundingBox[4] - applyPhiPad), int(boundingBox[5] + applyPhiPad + 1)]
# In either case, extract imagette1, now guaranteed to be the right size
imagette1padded = spam.helpers.slicePadded(im1, startStopIm1)
# If F is not the identity, apply F and undo crop
if numpy.allclose(PhiNoDisp, numpy.eye(4)):
# In this case there is is no padding (despite the name) and we can just keep going
imagette1def = imagette1padded
else:
# apply F to imagette 1 padded
imagette1paddedDef = spam.DIC.applyPhi(imagette1padded, PhiNoDisp)
# undo padding
imagette1def = imagette1paddedDef[applyPhiPad:-applyPhiPad,
applyPhiPad:-applyPhiPad,
applyPhiPad:-applyPhiPad]
### Check mask
if im1mask is None:
# no mask1 --> always pas this test (e.g., labelled image)
maskVolumeCondition = True
imagette1mask = None
else:
imagette1mask = spam.helpers.slicePadded(im1mask, boundingBox + numpy.array([0, 1, 0, 1, 0, 1])) != 0
maskVolumeCondition = imagette1mask.mean() >= minMaskCoverage
# Make sure imagette is not 0-dimensional in any dimension
# Check minMaskVolume
if numpy.all(numpy.array(imagette1def.shape) > 0):
# ------------ Grey threshold low --------------- and -------------- Grey threshold high -----------
if numpy.nanmean(imagette1def) > greyThreshold[0] and numpy.nanmean(imagette1def) < greyThreshold[1]:
if maskVolumeCondition:
# Slice for image 2
## 2020-09-25 OS and EA: Prepare startStop array for imagette 2 to be extracted with new slicePadded
## Extract it...
startStopIm2 = [int(boundingBox[0] + initialDisplacement[0] + searchRange[0] ),
int(boundingBox[1] + initialDisplacement[0] + searchRange[1] + 1),
int(boundingBox[2] + initialDisplacement[1] + searchRange[2] ),
int(boundingBox[3] + initialDisplacement[1] + searchRange[3] + 1),
int(boundingBox[4] + initialDisplacement[2] + searchRange[4] ),
int(boundingBox[5] + initialDisplacement[2] + searchRange[5] + 1)]
imagette2 = spam.helpers.slicePadded(im2, startStopIm2)
# Failed minMaskVolume condition
else:
returnStatus = -5
imagette1def = None
imagette2 = None
# Failed greylevel condition
else:
returnStatus = -5
imagette1def = None
imagette2 = None
# Failed 0-dimensional imagette test
else:
returnStatus = -5
imagette1def = None
imagette2 = None
# Failed bounding box test
else:
returnStatus = -7
imagette1def = None
imagette2 = None
return {'imagette1': imagette1def,
'imagette1mask': imagette1mask,
'imagette2': imagette2,
'returnStatus': returnStatus,
'pixelSearchOffset': searchRange[0::2]
}
def pixelSearchOneNode(nodeNumber):
"""
......@@ -390,7 +237,7 @@ def pixelSearchOneNode(nodeNumber):
imagetteReturns['returnStatus'] = 0
else:
imagetteReturns = getImagettes(nodePositions[nodeNumber], PhiField[nodeNumber].copy(), searchRange.copy(), boundingBoxes[nodeNumber], im1, im2, im1mask, args.MASK_COVERAGE, greyThreshold)
imagetteReturns = spam.DIC.getImagettes(nodePositions[nodeNumber], PhiField[nodeNumber].copy(), searchRange.copy(), boundingBoxes[nodeNumber], im1, im2, im1mask, args.MASK_COVERAGE, greyThreshold)
# If getImagettes was successful (size check and mask coverage check)
if imagetteReturns['returnStatus'] == 1:
......@@ -537,48 +384,3 @@ if args.VTK:
# If you insist on overlapping, then perhaps it's better to save each point as a cube glyph
# and actually *have* overlapping
spam.helpers.writeStructuredVTK(origin=nodePositions[0]-args.HWS, aspectRatio=args.NS, cellData=cellData, fileName=args.OUT_DIR+"/"+args.PREFIX+".vtk")
# tidy up, send message type -1 to all workers
if mpi:
for worker in range(numberOfWorkers ): mpiComm.send( None, dest=worker, tag=5)
print("\n")
#elif mpi: # We are not the mpi boss, so we are a lukasKanade worker
#import spam.DIC.correlate
#while True:
#m = mpiComm.recv(source=boss, tag=mpi4py.MPI.ANY_TAG, status=mpiStatus)
#tag = mpiStatus.Get_tag()
## We've recieved a node to process
#if tag == 1:
#pixelSearchReturns = spam.DIC.correlate.pixelSearch(m['im1'],
#m['im2'],
#imagette1mask=m['im1mask'],
#searchRange=m['searchRange'],
#searchCentre=m['searchCentre'])
## print "\t\tI am worker {} Sending result for node {}".format( mpiRank, node )
#mpiComm.send([mpiRank, m['nodeNumber'], pixelSearchReturns, m['initialDisplacement']], dest=boss, tag=2)
#elif tag == 3:
## print "\n\n\t\tI am worker {} working on node {}".format( mpiRank, m )
#lukasKanadeReturns = spam.DIC.correlate.register(m['im1'],
#m['im2'],
#im1mask=m['im1mask'],
#PhiInit=m['PhiInit'],
#margin=m['margin'],
#maxIterations=m['maxIterations'],
#deltaPhiMin=m['deltaPhiMin'],
#updateGradient=m['updateGradient'],
#interpolationOrder=m['interpolationOrder'],
#interpolator=m['interpolator'])
## print "\t\tI am worker {} Sending result for node {}".format( mpiRank, node )
#mpiComm.send([mpiRank, m['nodeNumber'], lukasKanadeReturns, m['nodeDisplacement']], dest=boss, tag=4)
#elif tag == 5:
## print("\t\tWorker {}: Quitting, bye!".format(mpiRank))
#exit()
#else:
#print("\t\tWorker {}: Don't recognise tag {}".format(mpiRank, tag))
## print("\t\tMessage:", message)
......@@ -42,10 +42,22 @@ class testAll(unittest.TestCase):
try:
rm(testFolder+"snow-ref.tif")
rm(testFolder+"snow-def.tif")
rm(testFolder+"snow-def-onlyDisp.tif")
rm(testFolder+"snow-mask.tif")
rm(testFolder+"snow-def-cgs.tif")
rm(testFolder+"snow-ref-lab-displaced.tif")
rm(testFolder+"snow-ref-lab.tif")
rm(testFolder+"snow-ref-snow-def-ereg-onlyRot.tsv")
rm(testFolder+"snow-ref-snow-def-ereg.tsv")
rm(testFolder+"snow-ref-snow-def-pixelSearch-CC.tif")
rm(testFolder+"snow-ref-snow-def-pixelSearch-returnStatus.tif")
rm(testFolder+"snow-ref-snow-def-pixelSearch-Xdisp.tif")
rm(testFolder+"snow-ref-snow-def-pixelSearch-Ydisp.tif")
rm(testFolder+"snow-ref-snow-def-pixelSearch-Zdisp.tif")
rm(testFolder+"snow-ref-snow-def-PSCC.tif")
rm(testFolder+"snow-ref-snow-def-pixelSearch.tsv")
rm(testFolder+"snow-ref-snow-def-registration.tsv")
#rm(testFolder+"")
rm(testFolder+"snow-ref-snow-def-bin1-registration.tsv")
rm(testFolder+"snow-ref-snow-def-bin2-registration.tsv")
rm(testFolder+"snow-ref-snow-def-discreteDVC.tsv")
......@@ -111,6 +123,11 @@ class testAll(unittest.TestCase):
tifffile.imsave(testFolder + "snow-def.tif", snowDef)
os.chmod(testFolder + "snow-def.tif", 0o666)
snowDefOnlyDisp = spam.DIC.applyPhi(snowRef, Phi=spam.deformation.computePhi({'t': refTranslation}))
# save it locally
tifffile.imsave(testFolder + "snow-def-onlyDisp.tif", snowDefOnlyDisp)
os.chmod(testFolder + "snow-def.tif", 0o666)
# Mask for pixel search
snowMask = numpy.ones_like(snowDef, dtype='<u1')
snowMask[0] = 0
......@@ -126,6 +143,14 @@ class testAll(unittest.TestCase):
'returnStatus': 2,
'deltaPhiNorm': 1})
# generate a fake "eye reg" initial guess which is close
spam.helpers.writeRegistrationTSV(testFolder + "snow-ref-snow-def-ereg-onlyRot.tsv", (numpy.array( snowRef.shape )-1)/2.0,
{'Phi': spam.deformation.computePhi({'r': refRotation}),
'error': 0,
'iterations': 0,
'returnStatus': 2,
'deltaPhiNorm': 1})
#######################################################
### Step 2 check spam-reg functionality
#######################################################
......@@ -134,8 +159,8 @@ class testAll(unittest.TestCase):
self.assertEqual(exitCode, 0)
regResult = spam.helpers.readCorrelationTSV(testFolder + "snow-ref-snow-def-registration.tsv")
transformation = spam.deformation.decomposePhi(regResult['PhiField'][0])
print(transformation['t'])
print(transformation['r'])
#print(transformation['t'])
#print(transformation['r'])
self.assertTrue(numpy.allclose(refTranslation, transformation['t'], atol=0.01))
self.assertTrue(numpy.allclose(refRotation, transformation['r'], atol=0.01))
......@@ -144,16 +169,59 @@ class testAll(unittest.TestCase):
### Step 3 check spam-pixelSearch functionality
#######################################################
### Step 3.1 load initial guess and just search around it...
exitCode = subprocess.call(["spam-pixelSearch", "-pf", testFolder + "snow-ref-snow-def-registration.tsv", "-sr", "-2", "2", "-2", "2", "-2", "2", "-glt", "5000", "-hws", "10", testFolder + "snow-ref.tif", testFolder + "snow-def.tif", "-od", testFolder + "", "-tif", "-mf1", testFolder + "snow-mask.tif", "-mc", "1.0"])
exitCode = subprocess.call(["spam-pixelSearch",
"-pf", testFolder + "snow-ref-snow-def-registration.tsv",
"-sr", "-2", "2", "-2", "2", "-2", "2",
"-glt", "5000", "-hws", "10",
testFolder + "snow-ref.tif", testFolder + "snow-def.tif",
"-od", testFolder + "", "-tif", "-mf1", testFolder + "snow-mask.tif", "-mc", "1.0"])
self.assertEqual(exitCode, 0)
PSresult = spam.helpers.readCorrelationTSV(testFolder + "snow-ref-snow-def-pixelSearch.tsv", readPixelSearchCC=True)
print(PSresult['PhiField'][PSresult['returnStatus']==1,0,-1].mean())
print(PSresult['pixelSearchCC'][PSresult['returnStatus']==1].mean())
#print(PSresult['PhiField'][PSresult['returnStatus']==1,0,-1].mean())
#print(PSresult['pixelSearchCC'][PSresult['returnStatus']==1].mean())
# Assert that the CC is nice and high
self.assertTrue(0.95 < PSresult['pixelSearchCC'][PSresult['returnStatus']==1].mean())
# And the z-displacement is low
self.assertTrue(numpy.isclose(0, PSresult['PhiField'][PSresult['returnStatus']==1,0,-1].mean(), atol=1.0))
### Step 3.2 load initial ONLY ROTATION guess and do a big search around the applied displacement
exitCode = subprocess.call(["spam-pixelSearch", "-pf", testFolder + "snow-ref-snow-def-ereg-onlyRot.tsv",
"-sr",
"{}".format(int(refTranslation[0]-2)), "{}".format(int(refTranslation[0]+2)),
"{}".format(int(refTranslation[1]-2)), "{}".format(int(refTranslation[1]+2)),
"{}".format(int(refTranslation[2]-2)), "{}".format(int(refTranslation[2]+2)),
"-glt", "5000", "-hws", "10",
testFolder + "snow-ref.tif", testFolder + "snow-def.tif",
"-od", testFolder + "", "-tif", "-mf1", testFolder + "snow-mask.tif", "-mc", "1.0"])
self.assertEqual(exitCode, 0)
PSresult = spam.helpers.readCorrelationTSV(testFolder + "snow-ref-snow-def-pixelSearch.tsv", readPixelSearchCC=True)
#print(PSresult['PhiField'][PSresult['returnStatus']==1,0,-1].mean())
#print(PSresult['pixelSearchCC'][PSresult['returnStatus']==1].mean())
# Assert that the CC is nice and high
self.assertTrue(0.95 < PSresult['pixelSearchCC'][PSresult['returnStatus']==1].mean())
# And the z-displacement is low
self.assertTrue(numpy.isclose(0, PSresult['PhiField'][PSresult['returnStatus']==1,0,-1].mean(), atol=1.0))
### Step 3.3 NO ROTATION IMAGE big search around the applied displacement
exitCode = subprocess.call(["spam-pixelSearch",
"-sr",
"{}".format(int(refTranslation[0]-2)), "{}".format(int(refTranslation[0]+2)),
"{}".format(int(refTranslation[1]-2)), "{}".format(int(refTranslation[1]+2)),
"{}".format(int(refTranslation[2]-2)), "{}".format(int(refTranslation[2]+2)),
"-glt", "5000", "-hws", "10",
testFolder + "snow-ref.tif", testFolder + "snow-def-onlyDisp.tif",
"-od", testFolder + "", "-mf1", testFolder + "snow-mask.tif", "-mc", "1.0"])
self.assertEqual(exitCode, 0)
PSresult = spam.helpers.readCorrelationTSV(testFolder + "snow-ref-snow-def-onlyDisp-pixelSearch.tsv", readPixelSearchCC=True)
# Assert that the CC is nice and high
self.assertTrue(0.95 < PSresult['pixelSearchCC'][PSresult['returnStatus']==1].mean())
# And the z-displacement is low
self.assertTrue(numpy.isclose(refTranslation[0], PSresult['PhiField'][PSresult['returnStatus']==1,0,-1].mean(), atol=1.0))
self.assertTrue(numpy.isclose(refTranslation[1], PSresult['PhiField'][PSresult['returnStatus']==1,1,-1].mean(), atol=1.0))
self.assertTrue(numpy.isclose(refTranslation[2], PSresult['PhiField'][PSresult['returnStatus']==1,2,-1].mean(), atol=1.0))
exit()
# Check output results with TSV output, bin1 registration + TIFFs
# Decreasing node spacing in order to have more points for Geers strain calculation
......
This diff is collapsed.
......@@ -34,12 +34,8 @@ GLPv3descriptionHeader = "Copyright (C) 2020 SPAM developers\n"+\
"This program comes with ABSOLUTELY NO WARRANTY.\n"+\
"This is free software, and you are welcome to redistribute it under certain conditions\n\n\n"
def ldicParser(parser):
parser.add_argument('-nompi',
action="store_false",
dest='MPI',
help='Force disactivate MPI? Only unse this if you cannot import mpi4py')
def ldicParser(parser):
parser.add_argument('inFiles',
nargs='+',
type=argparse.FileType('r'),
......@@ -87,68 +83,6 @@ def ldicParser(parser):
dest='GREY_HIGH_THRESH',
help="Grey threshold on mean of reference imagette ABOVE which the correlation is not performed. Default = infinity")
parser.add_argument('-reg',
'--registration',
action="store_true",
dest='REG',
help='Perform an initial registration? Default = False')
parser.add_argument('-regbb',
'--registration-binning-begin',
type=int,
default=4,
dest='REG_BIN_BEGIN',
help='Initial binning to apply to input images for initial registration. Default = 4')
parser.add_argument('-regbe',
'--registration-binning-end',
type=int,
default=1,
dest='REG_BIN_END',
help='Binning level to stop at for initial registration. Default = 1')
parser.add_argument('-regm',
'--registration-margin',
type=float,
default=0.1,
dest='REG_MARGIN',
help='Registration margin in proportions of image size. Default = 0.1, which means 0.1 * image size from both sides')
parser.add_argument('-regs',
'--subtract-registration',
action="store_true",
dest='REGSUB',
help='Subtract rigid part of initial registration from output displacements? Default = False')
parser.add_argument('-regu',
'--registration-update',
action="store_true",
dest='REG_UPDATE',
help='Update gradient in initial registration? More computation time but more robust and possibly fewer iterations. Default = False')
parser.add_argument('-ps',
'--pixel-search',
type=str,
default='auto',
dest='PS',
help="Pixel search option.Accepted values are:\n\t\"auto\": disactivate pixel search if registration works, or Ffile is given." +
"\n\t\"on\": Force a pixel search in any case (rounds initial guess and can lose initial F).\n\t\"off\": block pixel search. \"auto\" is default")
parser.add_argument('-psr',
'--pixel-search-range',
nargs=6,
type=int,
default=[-3, 3, -3, 3, -3, 3],
dest='PSR',
help='Z- Z+ Y- Y+ X- X+ ranges (in pixels) for the pxiel search. Requires pixel search to be activated. Default = +-3px')
parser.add_argument('-psf',
'--pixel-search-filter',
type=int,
default=0,
dest='PS_FILTER',
help='Median filter pixel search results. Default = 0')
# Default: node spacing equal in all three directions
parser.add_argument('-ns',
'--node-spacing',
......@@ -253,46 +187,6 @@ def ldicParser(parser):
dest='SERIES_INCREMENTAL',
help='Perform incremental correlations between images')
parser.add_argument('-cif',
'--correct-input-field',
action="store_true",
dest='CORRECT_FIELD',
help='Activates correction of the input F field')
parser.add_argument('-cni',
'--correct-neighbours-for-field-interpolation',
type=int,
default=12,
dest='CORRECT_NEIGHBOURS',
help="Number of neighbours for field interpolation. Default = 12")
parser.add_argument('-cmf',
'--correct-median-filter',
action="store_true",
dest='CORRECT_MEDIAN_FILTER',
help="Activates an overall median filter on the input F field")
parser.add_argument('-cmfr',
'--correct-median-filter-radius',
type=int,
default=2,
dest='CORRECT_MEDIAN_FILTER_RADIUS',
help="Radius of median filter for correction of input F field. Default = 2")
parser.add_argument('-cdp',
'--correct-delta-phi-norm',
type=numpy.float,
default=0.001,
dest='CORRECT_DELTA_PHI_NORM',
help="Delta F norm for a return status = 1 correlation window to consider the point good. Default = 0.001")
parser.add_argument('-cpscc',
'--correct-pixel-search-cc',
type=numpy.float,
default=0.98,
dest='CORRECT_PIXEL_SEARCH_CC',
help="Pixel search correlation coefficient to consider the point good. Default = 0.98")
parser.add_argument('-od',
'--out-dir',
type=str,
......@@ -367,15 +261,6 @@ def ldicParser(parser):
if not os.path.isdir(args.OUT_DIR):
raise
# Output file name prefix
# 2018-11-15 EA: Setting this in the client in order not to overwrite files in a series
# if args.PREFIX is None:
# args.PREFIX = os.path.splitext(os.path.basename(args.inFiles[0].name))[0]
if args.PS != 'auto' and args.PS != 'on' and args.PS != 'off':
print("\nInvalid option {} for pixel search. Setting \"auto\"".format(args.PS))
args.PS = 'auto'
# Catch interdependent node spacing and correlation window sizes
if args.NS is None:
print("\nUsing default node spacing: "),
......@@ -433,9 +318,6 @@ def ldicParser(parser):
print("#############################################################")
print("#############################################################")
if args.REG_MARGIN > 0.45:
print("Registration margin cannot be bigger than 0.45 since 0.5 would contain no data!!")
if args.SERIES_PHIFILE:
args.TSV = True
......@@ -462,33 +344,6 @@ def ddicParser(parser):
type=argparse.FileType('r'),
help="Greyscale image of deformed state for correlation")
parser.add_argument('-ps',
'--pixel-search',
type=str,
default='auto',
dest='PS',
help="Pixel search option.Accepted values are:\n\t\"auto\": disactivate pixel search if registration works, or Ffile is given." +
"\n\t\"on\": Force a pixel search in any case (rounds initial guess and can lose initial F).\n\t\"off\": block pixel search. \"auto\" is default")
parser.add_argument('-nompi',
action="store_false",
dest='MPI',
help='Disactivate MPI parallelisation?')
parser.add_argument('-psr',
'--pixel-search-range',
nargs=6,
type=int,
default=[-3, 3, -3, 3, -3, 3],
dest='PSR',
help='Z- Z+ Y- Y+ X- X+ ranges (in pixels) for the pxiel search. Requires pixel search to be activated. Default = +-3px')
parser.add_argument('-nolc',
'--no-label-correlation',
action="store_false",
dest='LABEL_CORRELATE',
help='Disactivate label registration?')
parser.add_argument('-ld',
'--label-dilate',
type=int,
......@@ -510,45 +365,6 @@ def ddicParser(parser):
dest='VOLUME_THRESHOLD',
help="Volume threshold below which labels are ignored. Default = 100")
parser.add_argument('-reg',
'--registration',
action="store_true",
dest='REG',
help='Perform an initial registration? Default = False')
parser.add_argument('-regbb',
'--registration-binning-begin',
type=int,
default=4,
dest='REG_BIN_BEGIN',
help='Initial binning to apply to input images for initial registration. Default = 4')
parser.add_argument('-regbe',
'--registration-binning-end',
type=int,
default=1,
dest='REG_BIN_END',
help='Binning level to stop at for initial registration. Default = 1')
parser.add_argument('-regm',
'--registration-margin',
type=float,
default=0.1,
dest='REG_MARGIN',
help='Registration margin in proportions of image size. Default = 0.1, which means 0.1 * image size from both sides')
parser.add_argument('-regs',
'--subtract-registration',
action="store_true",
dest='REGSUB',
help='Subtract rigid part of initial registration from output displacements? Default = False')
parser.add_argument('-regu',
'--registration-update',
action="store_true",
dest='REG_UPDATE',
help='Update gradient in initial registration? More computation time but more robust and possibly fewer iterations. Default = False')
parser.add_argument('-pf',
'-phiFile',
dest='PHIFILE',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment