Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 23 additions & 6 deletions create_patches_fp.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ def patching(WSI_object, **kwargs):
start_time = time.time()

# Patch
file_path = WSI_object.process_contours(**kwargs)
file_path, attr_dict = WSI_object.process_contours(**kwargs)


### Stop Patch Timer
patch_time_elapsed = time.time() - start_time
return file_path, patch_time_elapsed
return file_path, patch_time_elapsed, attr_dict


def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_dir,
Expand All @@ -52,7 +52,8 @@ def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_d
filter_params = {'a_t':100, 'a_h': 16, 'max_n_holes':8},
vis_params = {'vis_level': -1, 'line_thickness': 500},
patch_params = {'use_padding': True, 'contour_fn': 'four_pt'},
patch_level = 0,
magnification = None,
patch_level = 0, custom_downsample = 1,
use_default_params = False,
seg = False, save_mask = True,
stitch= False,
Expand Down Expand Up @@ -195,9 +196,23 @@ def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_d

patch_time_elapsed = -1 # Default time
if patch:
current_patch_params.update({'patch_level': patch_level, 'patch_size': patch_size, 'step_size': step_size,
if magnification:
if WSI_object.max_objective_magnification is None:
df.loc[idx, 'status'] = 'WSI do not has the attribute of objective magnification.'
print('WSI do not has the attribute of objective magnification.')
continue
patch_level, custom_downsample = WSI_object.getPatchLevel(magnification)
if isinstance(patch_level, dict):
df.loc[idx, 'status'] = patch_level['error']
print(patch_level['error'])
continue
current_patch_params.update({'patch_level': patch_level, 'patch_size': patch_size,
'step_size': step_size, 'custom_downsample': custom_downsample,
'save_path': patch_save_dir})
file_path, patch_time_elapsed = patching(WSI_object = WSI_object, **current_patch_params,)
file_path, patch_time_elapsed, attr_dict = patching(WSI_object = WSI_object, **current_patch_params)
print(attr_dict)
for key in ['patch_level', 'target_patch_size', 'target_step_size','patch_size', 'step_size', 'custom_downsample']:
df.loc[i, key] = attr_dict[key]

stitch_time_elapsed = -1
if stitch:
Expand Down Expand Up @@ -242,6 +257,8 @@ def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_d
help='directory to save processed data')
parser.add_argument('--preset', default=None, type=str,
help='predefined profile of default segmentation and filter parameters (.csv)')
parser.add_argument('--magnification', type=int, default=None,
help='objective magnification at which to patch. When it is not None, the patch_level does not work.')
parser.add_argument('--patch_level', type=int, default=0,
help='downsample level at which to patch')
parser.add_argument('--process_list', type = str, default=None,
Expand Down Expand Up @@ -306,6 +323,6 @@ def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_d
seg_times, patch_times = seg_and_patch(**directories, **parameters,
patch_size = args.patch_size, step_size=args.step_size,
seg = args.seg, use_default_params=False, save_mask = True,
stitch= args.stitch,
stitch= args.stitch, magnification = args.magnification,
patch_level=args.patch_level, patch = args.patch,
process_list = process_list, auto_skip=args.no_auto_skip)
5 changes: 4 additions & 1 deletion dataset_modules/dataset_h5.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ def __init__(self,
dset = f['coords']
self.patch_level = f['coords'].attrs['patch_level']
self.patch_size = f['coords'].attrs['patch_size']
self.target_patch_size = f['coords'].attrs['target_patch_size']
self.custom_downsample = f['coords'].attrs['custom_downsample']
self.length = len(dset)

self.summary()
Expand All @@ -84,7 +86,8 @@ def __getitem__(self, idx):
with h5py.File(self.file_path,'r') as hdf5_file:
coord = hdf5_file['coords'][idx]
img = self.wsi.read_region(coord, self.patch_level, (self.patch_size, self.patch_size)).convert('RGB')

if self.custom_downsample > 1:
img = img.resize((self.target_patch_size, self.target_patch_size))
img = self.roi_transforms(img)
return {'img': img, 'coord': coord}

Expand Down
49 changes: 47 additions & 2 deletions wsi_core/WholeSlideImage.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,50 @@ def __init__(self, path):
self.wsi = openslide.open_slide(path)
self.level_downsamples = self._assertLevelDownsamples()
self.level_dim = self.wsi.level_dimensions
self.max_objective_magnification = self.getMaxObjectivteMagnification()

self.contours_tissue = None
self.contours_tumor = None
self.hdf5_file = None

def getOpenSlide(self):
return self.wsi

def getMaxObjectivteMagnification(self):
if 'aperio.AppMag' in self.wsi.properties:
return int(self.wsi.properties['aperio.AppMag'])
elif 'openslide.mpp-x' in self.wsi.properties:
if abs(float(self.wsi.properties['openslide.mpp-x'])-0.25) < 0.1:
return 40
elif abs(float(self.wsi.properties['openslide.mpp-x'])-0.5) < 0.1:
return 20
elif 'tiff.XResolution' in self.wsi.properties:
# um per pixel
mpp = 1.0/(float(self.wsi.properties['tiff.XResolution'])/10000)
if abs(mpp-0.25) < 0.1:
return 40
elif abs(mpp-0.5) < 0.1:
return 20
return None

def getPatchLevel(self,magnification):
if magnification > self.max_objective_magnification:
return {'error':'The specified magnification exceeds the maximum available magnification.'}, None
if self.max_objective_magnification % magnification != 0:
return {'error':'The specified magnification must be divisible by the maximum magnification.'}, None
scale = self.max_objective_magnification / magnification
patch_level, custom_downsample = None, None
for i, level_downsample in enumerate(self.level_downsamples):
downsample = int(level_downsample[0])
if downsample == scale:
patch_level = i
custom_downsample = 1
elif downsample>scale:
break
if not patch_level:
patch_level = 0
custom_downsample = scale
return patch_level, custom_downsample

def initXML(self, xml_path):
def _createContour(coord_list):
Expand Down Expand Up @@ -368,7 +405,9 @@ def _assertLevelDownsamples(self):

return level_downsamples

def process_contours(self, save_path, patch_level=0, patch_size=256, step_size=256, **kwargs):
def process_contours(self, save_path, patch_level=0, patch_size=256, step_size=256, custom_downsample=1, **kwargs):
target_patch_size, target_step_size = patch_size, step_size
patch_size, step_size = int(target_patch_size * custom_downsample), int(target_step_size * custom_downsample)
save_path_hdf5 = os.path.join(save_path, str(self.name) + '.h5')
print("Creating patches for: ", self.name, "...",)
elapsed = time.time()
Expand All @@ -381,14 +420,19 @@ def process_contours(self, save_path, patch_level=0, patch_size=256, step_size=2
print('Processing contour {}/{}'.format(idx, n_contours))

asset_dict, attr_dict = self.process_contour(cont, self.holes_tissue[idx], patch_level, save_path, patch_size, step_size, **kwargs)
attr_dict['coords'].update({
'target_patch_size': target_patch_size,
'target_step_size': target_step_size,
'custom_downsample': custom_downsample
})
if len(asset_dict) > 0:
if init:
save_hdf5(save_path_hdf5, asset_dict, attr_dict, mode='w')
init = False
else:
save_hdf5(save_path_hdf5, asset_dict, mode='a')

return self.hdf5_file
return self.hdf5_file, attr_dict['coords']


def process_contour(self, cont, contour_holes, patch_level, save_path, patch_size = 256, step_size = 256,
Expand Down Expand Up @@ -464,6 +508,7 @@ def process_contour(self, cont, contour_holes, patch_level, save_path, patch_siz
asset_dict = {'coords' : results}

attr = {'patch_size' : patch_size, # To be considered...
'step_size' : step_size,
'patch_level' : patch_level,
'downsample': self.level_downsamples[patch_level],
'downsampled_level_dim' : tuple(np.array(self.level_dim[patch_level])),
Expand Down