|
| 1 | +import numpy as np |
| 2 | +import rawpy |
| 3 | +from typing import NamedTuple, Optional |
| 4 | +from RawHandler.utils import sparse_representation_three_channel |
| 5 | +from RawHandler.MetaDataHandler import MetaDataHandler |
| 6 | +from RawHandler.dng_utils import to_dng |
| 7 | +from typing import Literal, Tuple |
| 8 | + |
| 9 | +from RawHandler.utils import ( |
| 10 | + make_colorspace_matrix, |
| 11 | + pixel_unshuffle, |
| 12 | + sparse_representation_and_mask, |
| 13 | +) |
| 14 | + |
| 15 | + |
| 16 | +# Define a NamedTuple for the core metadata required by BaseRawHandler for processing |
| 17 | +class CoreRawMetadata(NamedTuple): |
| 18 | + black_level_per_channel: np.ndarray |
| 19 | + white_level: int |
| 20 | + rgb_xyz_matrix: np.ndarray |
| 21 | + raw_pattern: np.ndarray |
| 22 | + camera_white_balance: np.ndarray |
| 23 | + iheight: int |
| 24 | + iwidth: int |
| 25 | + |
| 26 | + |
| 27 | +class BaseRawHandlerRawpy: |
| 28 | + """ |
| 29 | + Base class for handling raw image pixel data. |
| 30 | +
|
| 31 | + Args: |
| 32 | + pixel_array (np.array): A 2D NumPy array representing the raw pixel data. |
| 33 | + core_metadata (CoreRawMetadata): A NamedTuple containing essential metadata for processing. |
| 34 | + full_metadata (Optional[FullRawMetadata]): A class wrapping exiv2 to handle metadata information. |
| 35 | + """ |
| 36 | + |
| 37 | + def __init__( |
| 38 | + self, |
| 39 | + rawpy_object: rawpy.RawPy, |
| 40 | + core_metadata: CoreRawMetadata, |
| 41 | + full_metadata: Optional[dict] = None, |
| 42 | + colorspace: Literal[ |
| 43 | + "camera", "XYZ", "sRGB", "AdobeRGB", "lin_rec2020" |
| 44 | + ] = "lin_rec2020", |
| 45 | + ): |
| 46 | + if not isinstance(core_metadata, CoreRawMetadata): |
| 47 | + raise TypeError("core_metadata must be an instance of CoreRawMetadata.") |
| 48 | + |
| 49 | + self.rawpy_object = rawpy_object |
| 50 | + self.core_metadata = core_metadata |
| 51 | + self.full_metadata = full_metadata if full_metadata is not None else None |
| 52 | + self.colorspace = colorspace |
| 53 | + self.camera_linear = None |
| 54 | + |
| 55 | + def compute_linear(self): |
| 56 | + self.camera_linear = ( |
| 57 | + self.rawpy_object.postprocess( |
| 58 | + user_wb=[1, 1, 1, 1], |
| 59 | + output_color=rawpy.ColorSpace.raw, |
| 60 | + no_auto_bright=True, |
| 61 | + use_camera_wb=False, |
| 62 | + use_auto_wb=False, |
| 63 | + gamma=(1, 1), |
| 64 | + user_flip=0, |
| 65 | + output_bps=16, |
| 66 | + user_black=0, |
| 67 | + no_auto_scale=True, |
| 68 | + ) |
| 69 | + / self.core_metadata.white_level |
| 70 | + ).transpose(2, 0, 1) |
| 71 | + |
| 72 | + # orig_dims = camera_linear.shape |
| 73 | + # rgb_to_xyz = self.core_metadata.rgb_xyz_matrix[:3] |
| 74 | + # camera_linear = (rgb_to_xyz @ camera_linear.reshape(3, -1)).reshape(orig_dims) |
| 75 | + # self.camera_linear = camera_linear |
| 76 | + |
| 77 | + def _input_handler(self, dims=None, safe_crop=0) -> np.ndarray: |
| 78 | + """ |
| 79 | + Crops linear array. |
| 80 | + """ |
| 81 | + if self.camera_linear is None: |
| 82 | + self.compute_linear() |
| 83 | + if dims is not None: |
| 84 | + h1, h2, w1, w2 = dims |
| 85 | + if safe_crop: |
| 86 | + h1, h2, w1, w2 = list( |
| 87 | + map(lambda x: x - x % safe_crop, [h1, h2, w1, w2]) |
| 88 | + ) |
| 89 | + return self.camera_linear[:, h1:h2, w1:w2] |
| 90 | + else: |
| 91 | + return self.camera_linear |
| 92 | + |
| 93 | + def rgb_colorspace_transform(self, colorspace=None, **kwargs) -> np.ndarray: |
| 94 | + """ |
| 95 | + Generates a color space transformation matrix for this image. |
| 96 | + """ |
| 97 | + colorspace = colorspace or self.colorspace |
| 98 | + if colorspace == "camera": |
| 99 | + return np.array( |
| 100 | + [ |
| 101 | + [1.0, 0.0, 0.0], |
| 102 | + [0.0, 1.0, 0.0], |
| 103 | + [0.0, 0.0, 1.0], |
| 104 | + ] |
| 105 | + ) |
| 106 | + rgb_to_xyz = np.linalg.inv(self.core_metadata.rgb_xyz_matrix[:3]) |
| 107 | + if colorspace == "XYZ": |
| 108 | + return rgb_to_xyz |
| 109 | + |
| 110 | + transform = make_colorspace_matrix(rgb_to_xyz, colorspace=colorspace, **kwargs) |
| 111 | + return transform |
| 112 | + |
| 113 | + def apply_colorspace_transform( |
| 114 | + self, |
| 115 | + dims=None, |
| 116 | + safe_crop=0, |
| 117 | + xyz_to_colorspace: np.ndarray = None, |
| 118 | + colorspace=None, |
| 119 | + clip=False, |
| 120 | + ) -> np.ndarray: |
| 121 | + """ |
| 122 | + Converts or returns linear data converted into specified colorspace. |
| 123 | + """ |
| 124 | + camera_linear = self._input_handler(dims=dims, safe_crop=safe_crop) |
| 125 | + rgb_transform = self.rgb_colorspace_transform( |
| 126 | + colorspace=colorspace, xyz_to_colorspace=xyz_to_colorspace |
| 127 | + ) |
| 128 | + orig_dims = camera_linear.shape |
| 129 | + transformed = (rgb_transform @ camera_linear.reshape(3, -1)).reshape(orig_dims) |
| 130 | + if clip: |
| 131 | + transformed = np.clip(transformed, 0, 1) |
| 132 | + return transformed |
| 133 | + |
| 134 | + def compute_mask_and_sparse( |
| 135 | + self, dims=None, safe_crop=0, divide_by_wl=True |
| 136 | + ) -> Tuple[np.ndarray, np.ndarray]: |
| 137 | + sparse, mask = sparse_representation_and_mask( |
| 138 | + self.rawpy_object.raw_image_visible, self.core_metadata.raw_pattern |
| 139 | + ) |
| 140 | + if divide_by_wl: |
| 141 | + sparse = sparse / self.core_metadata.white_level |
| 142 | + if dims is not None: |
| 143 | + h1, h2, w1, w2 = dims |
| 144 | + if safe_crop: |
| 145 | + h1, h2, w1, w2 = list( |
| 146 | + map(lambda x: x - x % safe_crop, [h1, h2, w1, w2]) |
| 147 | + ) |
| 148 | + return sparse[:, h1:h2, w1:w2], mask[:, h1:h2, w1:w2] |
| 149 | + else: |
| 150 | + return sparse, mask |
| 151 | + |
| 152 | + def downsize( |
| 153 | + self, min_preview_size=256, colorspace=None, clip=False, safe_crop=0 |
| 154 | + ) -> np.ndarray: |
| 155 | + _, H, W = self.camera_linear.shape |
| 156 | + W_steps, H_steps = H // min_preview_size - 1, W // min_preview_size - 1 |
| 157 | + steps = min(W_steps, H_steps) |
| 158 | + c_first_linear = self.apply_colorspace_transform( |
| 159 | + colorspace=colorspace, clip=clip, safe_crop=safe_crop |
| 160 | + )[0] |
| 161 | + c_first_linear = c_first_linear[:, ::steps, ::steps] |
| 162 | + return c_first_linear |
| 163 | + |
| 164 | + def generate_thumbnail( |
| 165 | + self, |
| 166 | + min_preview_size=256, |
| 167 | + colorspace=None, |
| 168 | + clip=False, |
| 169 | + safe_crop=0, |
| 170 | + ) -> np.ndarray: |
| 171 | + c_first_linear = self.downsize( |
| 172 | + min_preview_size=min_preview_size, |
| 173 | + colorspace=colorspace, |
| 174 | + clip=clip, |
| 175 | + safe_crop=safe_crop, |
| 176 | + ) |
| 177 | + return c_first_linear |
| 178 | + |
| 179 | + def as_rgb( |
| 180 | + self, |
| 181 | + colorspace=None, |
| 182 | + dims=None, |
| 183 | + clip=False, |
| 184 | + safe_crop=0, |
| 185 | + ) -> np.ndarray: |
| 186 | + c_first_linear = self.apply_colorspace_transform( |
| 187 | + colorspace=colorspace, dims=dims, safe_crop=safe_crop |
| 188 | + ) |
| 189 | + if clip: |
| 190 | + c_first_linear = np.clip(c_first_linear, 0, 1) |
| 191 | + return c_first_linear |
| 192 | + |
| 193 | + def as_sparse( |
| 194 | + self, |
| 195 | + colorspace=None, |
| 196 | + dims=None, |
| 197 | + clip=False, |
| 198 | + safe_crop=0, |
| 199 | + pattern="RGGB", |
| 200 | + cfa_type="bayer", |
| 201 | + ) -> np.ndarray: |
| 202 | + c_first_linear = self.apply_colorspace_transform( |
| 203 | + colorspace=colorspace, dims=dims, safe_crop=safe_crop |
| 204 | + ) |
| 205 | + sparse = sparse_representation_three_channel( |
| 206 | + c_first_linear, pattern=pattern, cfa_type=cfa_type |
| 207 | + ) |
| 208 | + if clip: |
| 209 | + sparse = np.clip(sparse, 0, 1) |
| 210 | + return sparse |
| 211 | + |
| 212 | + def as_cfa(self, **kwargs) -> np.ndarray: |
| 213 | + sparse = self.as_sparse(**kwargs) |
| 214 | + return sparse.sum(axis=0, keepdims=True) |
| 215 | + |
| 216 | + def as_rggb(self, cfa_type="bayer", **kwargs) -> np.ndarray: |
| 217 | + cfa = self.as_CFA(**kwargs) |
| 218 | + if cfa_type == "bayer": |
| 219 | + rggb = pixel_unshuffle(cfa, 2) |
| 220 | + else: |
| 221 | + rggb = pixel_unshuffle(cfa, 6) |
| 222 | + return rggb |
| 223 | + |
| 224 | + def to_dng(self, filepath, uint_img=None): |
| 225 | + try: |
| 226 | + to_dng(self, filepath, uint_img=uint_img) |
| 227 | + return True |
| 228 | + except Exception as e: |
| 229 | + print(e) |
| 230 | + return False |
| 231 | + |
| 232 | + |
| 233 | +class RawHandlerRawpy: |
| 234 | + """ |
| 235 | + Factory class to create BaseRawHandlerRawpy instances from raw image files. |
| 236 | + This class handles rawpy specific parsing for pixel data and core metadata, |
| 237 | + and uses exifread for extracting general EXIF metadata. |
| 238 | +
|
| 239 | + Args: |
| 240 | + path (string): Path to raw file. |
| 241 | + """ |
| 242 | + |
| 243 | + def __new__(cls, path: str, **kwargs): |
| 244 | + # Use rawpy for raw pixel data and core processing metadata |
| 245 | + rawpy_object = rawpy.imread(path) |
| 246 | + |
| 247 | + # Extract Core Metadata for BaseRawHandler's processing logic |
| 248 | + core_metadata = CoreRawMetadata( |
| 249 | + black_level_per_channel=rawpy_object.black_level_per_channel, |
| 250 | + white_level=rawpy_object.white_level, |
| 251 | + rgb_xyz_matrix=rawpy_object.rgb_xyz_matrix, |
| 252 | + raw_pattern=rawpy_object.raw_pattern, |
| 253 | + camera_white_balance=np.array(rawpy_object.camera_whitebalance), |
| 254 | + iheight=rawpy_object.sizes.iheight, |
| 255 | + iwidth=rawpy_object.sizes.iwidth, |
| 256 | + ) |
| 257 | + |
| 258 | + # Extract Metadata using exiv2 |
| 259 | + metadata = MetaDataHandler(path) |
| 260 | + |
| 261 | + return BaseRawHandlerRawpy( |
| 262 | + rawpy_object=rawpy_object, |
| 263 | + core_metadata=core_metadata, |
| 264 | + full_metadata=metadata, |
| 265 | + **kwargs, |
| 266 | + ) |
0 commit comments