BSD68
The BSD68 dataset was adapted from K. Zhang et al (TIP, 2017) and is composed of natural images. The noise was artificially added, allowing for quantitative comparisons with the ground truth, one of the benchmark used in many denoising publications. Here, we check the performances of N2V2, an extension of Noise2Void.
# Imports necessary to execute the code
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import tifffile
from careamics import CAREamist
from careamics.config import create_n2v_configuration
from careamics.utils.metrics import scale_invariant_psnr
from careamics_portfolio import PortfolioManager
from PIL import Image
Import the dataset¶
The dataset can be directly downloaded using the careamics-portfolio
package, which uses pooch
to download the data.
# instantiate data portfolio manage
portfolio = PortfolioManager()
# and download the data
root_path = Path("./data")
files = portfolio.denoising.N2V_BSD68.download(root_path)
# create paths for the data
data_path = Path(root_path / "denoising-N2V_BSD68.unzip/BSD68_reproducibility_data")
train_path = data_path / "train"
val_path = data_path / "val"
test_path = data_path / "test" / "images"
gt_path = data_path / "test" / "gt"
Visualize data¶
# load training and validation image and show them side by side
single_train_image = tifffile.imread(next(iter(train_path.rglob("*.tiff"))))[0]
single_val_image = tifffile.imread(next(iter(val_path.rglob("*.tiff"))))[0]
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].imshow(single_train_image, cmap="gray")
ax[0].set_title("Training Image")
ax[1].imshow(single_val_image, cmap="gray")
ax[1].set_title("Validation Image")
Text(0.5, 1.0, 'Validation Image')
Train with CAREamics¶
The easiest way to use CAREamics is to create a configuration and a CAREamist
.
Create configuration¶
The configuration can be built from scratch, giving the user full control over the various parameters available in CAREamics. However, a straightforward way to create a configuration for a particular algorithm is to use one of the convenience functions.
Here, the switch between Noise2Void and N2V2 is done by changing the use_n2v2
parameter.
config = create_n2v_configuration(
experiment_name="bsd68_n2v",
data_type="tiff",
axes="SYX",
patch_size=(64, 64),
batch_size=64,
num_epochs=100,
use_n2v2=True,
)
print(config)
{'algorithm_config': {'algorithm': 'n2v', 'loss': 'n2v', 'lr_scheduler': {'name': 'ReduceLROnPlateau', 'parameters': {}}, 'model': {'architecture': 'UNet', 'conv_dims': 2, 'depth': 2, 'final_activation': 'None', 'in_channels': 1, 'independent_channels': True, 'n2v2': True, 'num_channels_init': 32, 'num_classes': 1}, 'optimizer': {'name': 'Adam', 'parameters': {'lr': 0.0001}}}, 'data_config': {'axes': 'SYX', 'batch_size': 64, 'data_type': 'tiff', 'patch_size': [64, 64], 'transforms': [{'flip_x': True, 'flip_y': True, 'name': 'XYFlip', 'p': 0.5}, {'name': 'XYRandomRotate90', 'p': 0.5}, {'masked_pixel_percentage': 0.2, 'name': 'N2VManipulate', 'roi_size': 11, 'strategy': 'median', 'struct_mask_axis': 'none', 'struct_mask_span': 5}]}, 'experiment_name': 'bsd68_n2v', 'training_config': {'accumulate_grad_batches': 1, 'check_val_every_n_epoch': 1, 'checkpoint_callback': {'auto_insert_metric_name': False, 'mode': 'min', 'monitor': 'val_loss', 'save_last': True, 'save_top_k': 3, 'save_weights_only': False, 'verbose': False}, 'enable_progress_bar': True, 'gradient_clip_algorithm': 'norm', 'max_steps': -1, 'num_epochs': 100, 'precision': '32'}, 'version': '0.1.0'}
Train¶
A CAREamist
can be created using a configuration alone, and then be trained by using the data already loaded in memory.
# instantiate a CAREamist
careamist = CAREamist(source=config)
# train
careamist.train(
train_source=train_path,
val_source=val_path,
)
Predict with CAREamics¶
Prediction is done with the same CAREamist
used for training.
prediction = careamist.predict(
source=test_path,
axes="YX",
tile_size=(256, 256),
tile_overlap=(48, 48),
)
Visualize the prediction¶
# Show two images
noises = [tifffile.imread(f) for f in sorted(test_path.glob("*.tiff"))]
gts = [tifffile.imread(f) for f in sorted(gt_path.glob("*.tiff"))]
# images to show
images = np.random.choice(range(len(noises)), 3)
fig, ax = plt.subplots(3, 3, figsize=(15, 15))
fig.tight_layout()
for i in range(3):
pred_image = prediction[images[i]].squeeze()
psnr_noisy = scale_invariant_psnr(gts[images[i]], noises[images[i]])
psnr_result = scale_invariant_psnr(gts[images[i]], pred_image)
ax[i, 0].imshow(noises[images[i]], cmap="gray")
ax[i, 0].title.set_text(f"Noisy\nPSNR: {psnr_noisy:.2f}")
ax[i, 1].imshow(pred_image, cmap="gray")
ax[i, 1].title.set_text(f"Prediction\nPSNR: {psnr_result:.2f}")
ax[i, 2].imshow(gts[images[i]], cmap="gray")
ax[i, 2].title.set_text("Ground-truth")
Compute metrics¶
psnrs = np.zeros((len(prediction), 1))
for i, (pred, gt) in enumerate(zip(prediction, gts)):
psnrs[i] = scale_invariant_psnr(gt, pred.squeeze())
print(f"PSNR: {psnrs.mean():.2f} +/- {psnrs.std():.2f}")
print("Reported PSNR: 27.71")
PSNR: 26.89 +/- 2.45 Reported PSNR: 27.71
Create cover¶
# create a cover image
im_idx = 3
cv_image_noisy = noises[im_idx]
cv_image_pred = prediction[im_idx].squeeze()
# create image
cover = np.zeros((256, 256))
(height, width) = cv_image_noisy.shape
assert height > 256
assert width > 256
# normalize train and prediction
norm_noise = (cv_image_noisy - cv_image_noisy.min()) / (
cv_image_noisy.max() - cv_image_noisy.min()
)
norm_pred = (cv_image_pred - cv_image_pred.min()) / (
cv_image_pred.max() - cv_image_pred.min()
)
# fill in halves
cover[:, : 256 // 2] = norm_noise[
height // 2 - 256 // 2 : height // 2 + 256 // 2, width // 2 - 256 // 2 : width // 2
]
cover[:, 256 // 2 :] = norm_pred[
height // 2 - 256 // 2 : height // 2 + 256 // 2, width // 2 : width // 2 + 256 // 2
]
# plot the single image
plt.imshow(cover, cmap="gray")
# save the image
im = Image.fromarray(cover * 255)
im = im.convert("L")
im.save("BSD68_N2V2.jpeg")
# Export the model
careamist.export_to_bmz(
path_to_archive="bsd68_n2v2_model.zip",
friendly_model_name="BSD68_N2V2",
input_array=noises[im_idx][np.newaxis, :256, :256],
authors=[{"name": "CAREamics authors", "affiliation": "Human Technopole"}],
)