Skip to content

Commit ff1ece3

Browse files
authored
Merge pull request #4 from rymuelle/develop
Added functionality to store the original white balance and rotations and flips
2 parents 1a229c4 + 0cb4cc5 commit ff1ece3

6 files changed

Lines changed: 88 additions & 26 deletions

File tree

README.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ pip install .
5858

5959
---
6060

61-
## 📘 Example
61+
## Example
6262

6363
A simple demo notebook is available:
6464

@@ -82,6 +82,4 @@ Special thanks to the authors of **RawNIND**:
8282

8383
> Brummer, Benoit; De Vleeschouwer, Christophe, 2025.
8484
> *Raw Natural Image Noise Dataset.*
85-
> [https://doi.org/10.14428/DVN/DEQCIM](https://doi.org/10.14428/DVN/DEQCIM), Open Data @ UCLouvain, V1.
86-
87-
\
85+
> [https://doi.org/10.14428/DVN/DEQCIM](https://doi.org/10.14428/DVN/DEQCIM), Open Data @ UCLouvain, V1.

examples/dataset_for_noisy_images.ipynb

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": null,
5+
"execution_count": 1,
66
"id": "405d14a3",
77
"metadata": {},
88
"outputs": [],
@@ -15,7 +15,7 @@
1515
},
1616
{
1717
"cell_type": "code",
18-
"execution_count": null,
18+
"execution_count": 2,
1919
"id": "2bee5068",
2020
"metadata": {},
2121
"outputs": [],
@@ -37,10 +37,23 @@
3737
},
3838
{
3939
"cell_type": "code",
40-
"execution_count": null,
40+
"execution_count": 3,
4141
"id": "49b48e2c",
4242
"metadata": {},
43-
"outputs": [],
43+
"outputs": [
44+
{
45+
"ename": "AttributeError",
46+
"evalue": "'BaseRawHandler' object has no attribute 'input_handler'",
47+
"output_type": "error",
48+
"traceback": [
49+
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
50+
"\u001b[31mAttributeError\u001b[39m Traceback (most recent call last)",
51+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 8\u001b[39m\n\u001b[32m 5\u001b[39m high_iso_rh = RawHandler(\u001b[33m\"\u001b[39m\u001b[33mgtBark_12800.arw\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 6\u001b[39m dims = (\u001b[32m1500\u001b[39m, \u001b[32m1500\u001b[39m + \u001b[32m200\u001b[39m, \u001b[32m4500\u001b[39m, \u001b[32m4500\u001b[39m + \u001b[32m200\u001b[39m)\n\u001b[32m----> \u001b[39m\u001b[32m8\u001b[39m offset = \u001b[43malign_images\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhigh_iso_rh\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlow_iso_rh\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdims\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moffset\u001b[49m\u001b[43m=\u001b[49m\u001b[43m(\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 9\u001b[39m offset\n",
52+
"\u001b[36mFile \u001b[39m\u001b[32m~/Develop/RawHandler/src/RawHandler/utils.py:40\u001b[39m, in \u001b[36malign_images\u001b[39m\u001b[34m(rh1, rh2, dims, offset, max_iters, step_sizes)\u001b[39m\n\u001b[32m 36\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34malign_images\u001b[39m(\n\u001b[32m 37\u001b[39m rh1, rh2, dims, offset=(\u001b[32m0\u001b[39m, \u001b[32m0\u001b[39m, \u001b[32m0\u001b[39m, \u001b[32m0\u001b[39m), max_iters=\u001b[32m100\u001b[39m, step_sizes=[\u001b[32m16\u001b[39m, \u001b[32m8\u001b[39m, \u001b[32m4\u001b[39m, \u001b[32m2\u001b[39m]\n\u001b[32m 38\u001b[39m ):\n\u001b[32m 39\u001b[39m offset = np.array(offset)\n\u001b[32m---> \u001b[39m\u001b[32m40\u001b[39m bayer1 = \u001b[43mrh1\u001b[49m\u001b[43m.\u001b[49m\u001b[43minput_handler\u001b[49m(dims=dims)\n\u001b[32m 41\u001b[39m img_shape = rh1.raw.shape[-\u001b[32m2\u001b[39m:]\n\u001b[32m 43\u001b[39m loss = get_loss(bayer1, rh2.input_handler(dims=dims + offset))\n",
53+
"\u001b[31mAttributeError\u001b[39m: 'BaseRawHandler' object has no attribute 'input_handler'"
54+
]
55+
}
56+
],
4457
"source": [
4558
"# A simple code to align image is also provided for use in supervised learning.\n",
4659
"# The first raw handler is the target to be used to align to, the second is aligned.\n",

examples/simple_demosaicing.ipynb

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
"source": [
5252
"# We can acccess the underlying bayer data\n",
5353
"# dims = (h1, h2, w1, w2)\n",
54-
"dims = (1500, 1500 + 200, 4500, 4500 + 200)\n",
54+
"dims = (1800, 1800 + 200, 2700, 2700 + 200)\n",
5555
"bayer = rh.raw\n",
5656
"plt.imshow(bayer[dims[0] : dims[1], dims[2] : dims[3]])"
5757
]
@@ -150,15 +150,28 @@
150150
"id": "c2309479",
151151
"metadata": {},
152152
"outputs": [],
153-
"source": []
153+
"source": [
154+
"## We can flip the image (axis=0 for vertical, axis=1 for horizontal)\n",
155+
"\n",
156+
"rh.flip(axis=0)\n",
157+
"rh.flip(axis=1)\n",
158+
"img = rh.generate_thumbnail(clip=True)\n",
159+
"plt.imshow(linear_to_srgb(img))"
160+
]
154161
},
155162
{
156163
"cell_type": "code",
157164
"execution_count": null,
158165
"id": "6aaf94c4",
159166
"metadata": {},
160167
"outputs": [],
161-
"source": []
168+
"source": [
169+
"## We can rotate the image in 90 degree increments. k determines the number of increments.\n",
170+
"rh.rotate(k=1)\n",
171+
"rgb = rh.as_rgb(dims=dims)\n",
172+
"plt.imshow(linear_to_srgb(rgb).transpose(1, 2, 0))\n",
173+
"rgb.min(), rgb.max()"
174+
]
162175
}
163176
],
164177
"metadata": {

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "RawHandler"
3-
version = "0.0.2"
3+
version = "0.0.3"
44
description = "A basic library to handle camera raw files for use in machine learning. Built on rawpy and cv2."
55
authors = [
66
{ name = "Ryan Mueller"},

src/RawHandler/RawDataset.py

Lines changed: 31 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
import random
22
from torch.utils.data import Dataset
3+
import torch
34

45
from RawHandler.RawHandler import RawHandler
5-
from RawHandler.utils import align_images
6+
7+
import re
68

79

810
class RawDataset(Dataset):
@@ -25,8 +27,8 @@ def __getitem__(self, idx):
2527
# Crop and align
2628
H, W = noisy_rh.raw.shape[-2:]
2729
half_crop = self.crop_size // 2
28-
H_center = random.randint(0 + half_crop, H - half_crop)
29-
W_center = random.randint(0 + half_crop, W - half_crop)
30+
H_center = random.randint(0 + half_crop * 2, H - half_crop * 2)
31+
W_center = random.randint(0 + half_crop * 2, W - half_crop * 2)
3032
crop = (
3133
H_center - half_crop,
3234
H_center + half_crop,
@@ -36,16 +38,33 @@ def __getitem__(self, idx):
3638
if self.offsets is None:
3739
offset = (0, 0, 0, 0)
3840
else:
39-
offset = self.offsets[idx]
40-
offset = align_images(noisy_rh, gt_rh, crop, offset=(0, 0, 0, 0))
41+
offset = self.offsets[idx][0][0]
4142

4243
# Adjust exposure
43-
noisy_rggb = noisy_rh.as_rggb(dims=crop)
44-
noisy_rgb = noisy_rh.as_rgb(dims=crop)
45-
clean_rgb = gt_rh.as_rgb(dims=crop)
44+
gain = (
45+
noisy_rh.adjust_bayer_bw_levels(dims=crop).mean()
46+
/ gt_rh.adjust_bayer_bw_levels(dims=crop).mean()
47+
)
48+
gt_rh.gain = gain
49+
50+
# offset = align_images(noisy_rh, gt_rh, crop, offset=offset, step_sizes=[2])
51+
52+
noisy_rggb = noisy_rh.as_rggb_colorspace(dims=crop, colorspace="AdobeRGB")
53+
noisy_rgb = noisy_rh.as_rgb_colorspace(dims=crop, colorspace="AdobeRGB")
54+
clean_rgb = gt_rh.as_rgb_colorspace(dims=crop + offset, colorspace="AdobeRGB")
55+
56+
iso = re.findall("_ISO([0-9]+)_", noisy_file)
57+
if len(iso) == 1:
58+
iso = int(iso[0])
59+
else:
60+
iso = -100
61+
62+
iso_conditioning = iso / 65535
4663

4764
if self.transform:
48-
noisy_rggb = self.transform(noisy_rggb)
49-
noisy_rgb = self.transform(noisy_rgb)
50-
clean_rgb = self.transform(clean_rgb)
51-
return noisy_rggb, noisy_rgb, clean_rgb, offset
65+
noisy_rggb = self.transform(noisy_rggb.transpose(1, 2, 0))
66+
noisy_rgb = self.transform(noisy_rgb.transpose(1, 2, 0))
67+
clean_rgb = self.transform(clean_rgb.transpose(1, 2, 0))
68+
iso_conditioning = torch.tensor([iso_conditioning])
69+
70+
return noisy_rggb, noisy_rgb, clean_rgb, offset, iso_conditioning

src/RawHandler/RawHandler.py

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ class CoreRawMetadata(NamedTuple):
2020
white_level: int
2121
rgb_xyz_matrix: np.ndarray
2222
raw_pattern: np.ndarray
23+
camera_white_balance: np.ndarray
2324
iheight: int
2425
iwidth: int
2526

@@ -57,6 +58,22 @@ def _remove_masked_pixels(self, img: np.ndarray) -> np.ndarray:
5758
"""Removes masked pixels from the image based on core_metadata.iheight and core_metadata.iwidth."""
5859
return img[:, 0 : self.core_metadata.iheight, 0 : self.core_metadata.iwidth]
5960

61+
def flip(self, axis=1):
62+
raw = np.flip(self.raw, axis=axis)
63+
if axis == 1:
64+
self.raw = safe_crop(raw, dx=1, dy=0)
65+
else:
66+
self.raw = safe_crop(raw, dx=0, dy=1)
67+
68+
def rotate(self, k=1):
69+
raw = np.rot90(self.raw, k=k)
70+
if k == 1:
71+
self.raw = safe_crop(raw, dx=0, dy=1)
72+
if k == 2:
73+
self.raw = safe_crop(raw, dx=1, dy=1)
74+
if k == 3:
75+
self.raw = safe_crop(raw, dx=1, dy=0)
76+
6077
def _input_handler(self, dims=None) -> np.ndarray:
6178
"""
6279
Crops bayer array.
@@ -75,7 +92,7 @@ def _input_handler(self, dims=None) -> np.ndarray:
7592
else:
7693
return img
7794

78-
def _adjust_bayer_bw_levels(self, dims=None) -> np.ndarray:
95+
def _adjust_bayer_bw_levels(self, dims=None, clip=False) -> np.ndarray:
7996
"""
8097
Adjusts black and white levels of Bayer data.
8198
"""
@@ -90,7 +107,8 @@ def _adjust_bayer_bw_levels(self, dims=None) -> np.ndarray:
90107
self.core_metadata.white_level
91108
- self.core_metadata.black_level_per_channel[channel]
92109
)
93-
img = np.clip(img, 0, 1)
110+
if clip:
111+
img = np.clip(img, 0, 1)
94112
return img
95113

96114
def _make_bayer_map(self, bayer: np.ndarray) -> np.ndarray:
@@ -240,6 +258,7 @@ def __new__(cls, path: str, **kwargs):
240258
white_level=rawpy_object.white_level,
241259
rgb_xyz_matrix=rawpy_object.rgb_xyz_matrix,
242260
raw_pattern=rawpy_object.raw_pattern,
261+
camera_white_balance=np.array(rawpy_object.camera_whitebalance),
243262
iheight=rawpy_object.sizes.iheight,
244263
iwidth=rawpy_object.sizes.iwidth,
245264
)

0 commit comments

Comments
 (0)