|
| 1 | +import torch |
| 2 | +import numpy as np |
| 3 | +from typing import TYPE_CHECKING, Any, Optional, Union, Tuple |
| 4 | +from transformers.audio_utils import mel_filter_bank, spectrogram, window_function |
| 5 | +from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor |
| 6 | +from transformers.feature_extraction_utils import BatchFeature |
| 7 | +from transformers.utils import TensorType |
| 8 | + |
| 9 | + |
| 10 | +class WhisperFeatureExtractor(SequenceFeatureExtractor): |
| 11 | + |
| 12 | + model_input_names = ["input_features"] |
| 13 | + |
| 14 | + def __init__( |
| 15 | + self, |
| 16 | + feature_size=80, |
| 17 | + sampling_rate=16000, |
| 18 | + hop_length=160, |
| 19 | + chunk_length=30, |
| 20 | + n_fft=400, |
| 21 | + padding_value=0.0, |
| 22 | + dither=0.0, |
| 23 | + return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask |
| 24 | + **kwargs, |
| 25 | + ): |
| 26 | + super().__init__( |
| 27 | + feature_size=feature_size, |
| 28 | + sampling_rate=sampling_rate, |
| 29 | + padding_value=padding_value, |
| 30 | + return_attention_mask=return_attention_mask, |
| 31 | + **kwargs, |
| 32 | + ) |
| 33 | + self.n_fft = n_fft |
| 34 | + self.hop_length = hop_length |
| 35 | + self.chunk_length = chunk_length |
| 36 | + self.n_samples = chunk_length * sampling_rate |
| 37 | + self.nb_max_frames = self.n_samples // hop_length |
| 38 | + self.sampling_rate = sampling_rate |
| 39 | + self.dither = dither |
| 40 | + self.mel_filters = mel_filter_bank( |
| 41 | + num_frequency_bins=1 + n_fft // 2, |
| 42 | + num_mel_filters=feature_size, |
| 43 | + min_frequency=0.0, |
| 44 | + max_frequency=8000.0, |
| 45 | + sampling_rate=sampling_rate, |
| 46 | + norm="slaney", |
| 47 | + mel_scale="slaney", |
| 48 | + ) |
| 49 | + |
| 50 | + def _torch_extract_fbank_features(self, waveform: np.ndarray, device: str = "cpu") -> np.ndarray: |
| 51 | + waveform = torch.from_numpy(waveform).to(device, torch.float32) |
| 52 | + window = torch.hann_window(self.n_fft, device=device) |
| 53 | + |
| 54 | + if self.dither != 0.0: |
| 55 | + waveform += self.dither * torch.randn(waveform.shape, dtype=waveform.dtype, device=waveform.device) |
| 56 | + |
| 57 | + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) |
| 58 | + magnitudes = stft[..., :-1].abs() ** 2 |
| 59 | + |
| 60 | + mel_filters = torch.from_numpy(self.mel_filters).to(device, torch.float32) |
| 61 | + mel_spec = mel_filters.T @ magnitudes |
| 62 | + |
| 63 | + log_spec = torch.clamp(mel_spec, min=1e-10).log10() |
| 64 | + if waveform.dim() == 2: |
| 65 | + max_val = log_spec.max(dim=2, keepdim=True)[0].max(dim=1, keepdim=True)[0] |
| 66 | + log_spec = torch.maximum(log_spec, max_val - 8.0) |
| 67 | + else: |
| 68 | + log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) |
| 69 | + log_spec = (log_spec + 4.0) / 4.0 |
| 70 | + if device != "cpu": |
| 71 | + log_spec = log_spec.detach().cpu() |
| 72 | + return log_spec.numpy() |
| 73 | + |
| 74 | + # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2. |
| 75 | + # Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm |
| 76 | + def zero_mean_unit_var_norm( |
| 77 | + self, input_values: list[np.ndarray], attention_mask: list[np.ndarray], padding_value: float = 0.0 |
| 78 | + ) -> list[np.ndarray]: |
| 79 | + if attention_mask is not None: |
| 80 | + attention_mask = np.array(attention_mask, np.int32) |
| 81 | + normed_input_values = [] |
| 82 | + |
| 83 | + for vector, length in zip(input_values, attention_mask.sum(-1)): |
| 84 | + normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) |
| 85 | + if length < normed_slice.shape[0]: |
| 86 | + normed_slice[length:] = padding_value |
| 87 | + |
| 88 | + normed_input_values.append(normed_slice) |
| 89 | + else: |
| 90 | + normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] |
| 91 | + |
| 92 | + return normed_input_values |
| 93 | + |
| 94 | + def _preprocess( |
| 95 | + self, |
| 96 | + raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], |
| 97 | + truncation: bool = True, |
| 98 | + pad_to_multiple_of: Optional[int] = None, |
| 99 | + return_tensors: Optional[Union[str, TensorType]] = None, |
| 100 | + return_attention_mask: Optional[bool] = None, |
| 101 | + padding: Optional[str] = "longest", # max_length代表padding到max_length |
| 102 | + max_length: Optional[int] = None, |
| 103 | + sampling_rate: Optional[int] = 16000, |
| 104 | + do_normalize: Optional[bool] = None, |
| 105 | + device: Optional[str] = "cpu", |
| 106 | + return_token_timestamps: Optional[bool] = None, |
| 107 | + **kwargs, |
| 108 | + ) -> Tuple[torch.Tensor, torch.Tensor]: |
| 109 | + |
| 110 | + is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 |
| 111 | + if is_batched_numpy and len(raw_speech.shape) > 2: |
| 112 | + raise ValueError(f"Only mono-channel audio is supported for input to {self}") |
| 113 | + is_batched = is_batched_numpy or ( |
| 114 | + isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) |
| 115 | + ) |
| 116 | + |
| 117 | + if is_batched: |
| 118 | + raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] |
| 119 | + elif not is_batched and not isinstance(raw_speech, np.ndarray): |
| 120 | + raw_speech = np.asarray(raw_speech, dtype=np.float32) |
| 121 | + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): |
| 122 | + raw_speech = raw_speech.astype(np.float32) |
| 123 | + |
| 124 | + # always return batch |
| 125 | + if not is_batched: |
| 126 | + raw_speech = [np.asarray([raw_speech]).T] |
| 127 | + |
| 128 | + batched_speech = BatchFeature({"input_features": raw_speech}) |
| 129 | + |
| 130 | + # convert into correct format for padding |
| 131 | + |
| 132 | + padded_inputs = self.pad( |
| 133 | + batched_speech, |
| 134 | + padding=padding, |
| 135 | + max_length=max_length if max_length else self.n_samples, |
| 136 | + truncation=truncation, |
| 137 | + pad_to_multiple_of=pad_to_multiple_of, |
| 138 | + return_attention_mask=return_attention_mask or do_normalize, |
| 139 | + ) |
| 140 | + |
| 141 | + # zero-mean and unit-variance normalization |
| 142 | + if do_normalize: |
| 143 | + padded_inputs["input_features"] = self.zero_mean_unit_var_norm( |
| 144 | + padded_inputs["input_features"], |
| 145 | + attention_mask=padded_inputs["attention_mask"], |
| 146 | + padding_value=self.padding_value, |
| 147 | + ) |
| 148 | + padded_inputs["input_features"] = np.stack(padded_inputs["input_features"], axis=0) |
| 149 | + |
| 150 | + # make sure list is in array format |
| 151 | + input_features = padded_inputs.get("input_features").transpose(2, 0, 1) |
| 152 | + |
| 153 | + input_features = self._torch_extract_fbank_features(input_features[0], device) |
| 154 | + |
| 155 | + if isinstance(input_features[0], list): |
| 156 | + padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features] |
| 157 | + |
| 158 | + else: |
| 159 | + padded_inputs["input_features"] = input_features |
| 160 | + |
| 161 | + if return_attention_mask: |
| 162 | + # rescale from sample (48000) to feature (3000) |
| 163 | + rescaled_attention_mask = padded_inputs["attention_mask"][:, :: self.hop_length] |
| 164 | + |
| 165 | + # The STFT computation produces L//hop_length + 1 frames, |
| 166 | + # but we skip the last frame (see `_torch_extract_fbank_features`). |
| 167 | + # This means we need to trim the rescaled attention mask to match |
| 168 | + # the actual number of frames (L//hop_length) when the input length |
| 169 | + # is not perfectly divisible by the hop length. |
| 170 | + if padded_inputs["attention_mask"].shape[1] % self.hop_length != 0: |
| 171 | + rescaled_attention_mask = rescaled_attention_mask[:, :-1] |
| 172 | + padded_inputs["attention_mask"] = rescaled_attention_mask |
| 173 | + |
| 174 | + if return_token_timestamps is not None: |
| 175 | + padded_inputs["num_frames"] = [len(raw_speech_i) // self.hop_length for raw_speech_i in raw_speech] |
| 176 | + |
| 177 | + if return_tensors is not None: |
| 178 | + padded_inputs = padded_inputs.convert_to_tensors(return_tensors) |
| 179 | + input_features = torch.from_numpy(np.asarray(padded_inputs["input_features"], dtype=np.float32)).to( |
| 180 | + device="cuda", dtype=torch.bfloat16 |
| 181 | + ) |
| 182 | + attention_mask = torch.from_numpy(np.asarray(padded_inputs["attention_mask"], dtype=np.float32)).to( |
| 183 | + device="cuda", dtype=torch.int32 |
| 184 | + ) |
| 185 | + return input_features, attention_mask |
0 commit comments