first commit
This commit is contained in:
parent
53d1e11f30
commit
18875d738a
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,408 @@
|
|||
"""Image processor class for WD Tagger."""
|
||||
|
||||
from typing import Optional, List, Dict, Union, Tuple
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from transformers.image_processing_utils import (
|
||||
BaseImageProcessor,
|
||||
BatchFeature,
|
||||
get_size_dict,
|
||||
)
|
||||
from transformers.image_transforms import (
|
||||
rescale,
|
||||
to_channel_dimension_format,
|
||||
_rescale_for_pil_conversion,
|
||||
to_pil_image,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
IMAGENET_STANDARD_MEAN,
|
||||
IMAGENET_STANDARD_STD,
|
||||
ChannelDimension,
|
||||
ImageInput,
|
||||
PILImageResampling,
|
||||
infer_channel_dimension_format,
|
||||
is_scaled_image,
|
||||
make_list_of_images,
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import TensorType, logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
def resize_with_padding(
|
||||
image: np.ndarray,
|
||||
size: Tuple[int, int],
|
||||
color: Tuple[int, int, int],
|
||||
resample: PILImageResampling = None,
|
||||
reducing_gap: Optional[int] = None,
|
||||
data_format: Optional[ChannelDimension] = None,
|
||||
return_numpy: bool = True,
|
||||
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||
):
|
||||
"""
|
||||
Resizes `image` to `(height, width)` specified by `size` using the PIL library.
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
The image to resize.
|
||||
size (`Tuple[int, int]`):
|
||||
The size to use for resizing the image.
|
||||
color (`Tuple[int, int, int]`):
|
||||
The color to use for padding the image.
|
||||
resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
||||
The filter to user for resampling.
|
||||
reducing_gap (`int`, *optional*):
|
||||
Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to
|
||||
the fair resampling. See corresponding Pillow documentation for more details.
|
||||
data_format (`ChannelDimension`, *optional*):
|
||||
The channel dimension format of the output image. If unset, will use the inferred format from the input.
|
||||
return_numpy (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is
|
||||
returned.
|
||||
input_data_format (`ChannelDimension`, *optional*):
|
||||
The channel dimension format of the input image. If unset, will use the inferred format from the input.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: The resized image.
|
||||
"""
|
||||
|
||||
resample = resample if resample is not None else PILImageResampling.BILINEAR
|
||||
|
||||
if not len(size) == 2:
|
||||
raise ValueError("size must have 2 elements")
|
||||
|
||||
# For all transformations, we want to keep the same data format as the input image unless otherwise specified.
|
||||
# The resized image from PIL will always have channels last, so find the input format first.
|
||||
if input_data_format is None:
|
||||
input_data_format = infer_channel_dimension_format(image)
|
||||
data_format = input_data_format if data_format is None else data_format
|
||||
|
||||
# To maintain backwards compatibility with the resizing done in previous image feature extractors, we use
|
||||
# the pillow library to resize the image and then convert back to numpy
|
||||
do_rescale = False
|
||||
if not isinstance(image, Image.Image):
|
||||
do_rescale = _rescale_for_pil_conversion(image)
|
||||
image = to_pil_image(
|
||||
image, do_rescale=do_rescale, input_data_format=input_data_format
|
||||
)
|
||||
# PIL images are in the format (width, height)
|
||||
|
||||
assert isinstance(image, Image.Image)
|
||||
|
||||
height, width = size
|
||||
original_width, original_height = image.size
|
||||
|
||||
# ratio
|
||||
ratio = min(width / original_width, height / original_height)
|
||||
|
||||
# rescale and keep aspect ratio
|
||||
new_width = int(original_width * ratio)
|
||||
new_height = int(original_height * ratio)
|
||||
|
||||
resized_image = image.resize(
|
||||
(new_width, new_height), resample=resample, reducing_gap=reducing_gap
|
||||
)
|
||||
|
||||
# solid background
|
||||
new_image = Image.new("RGBA", size, (color) + (255,))
|
||||
|
||||
# paste resized image at the center
|
||||
offset = ((width - new_width) // 2, (height - new_height) // 2)
|
||||
new_image.paste(
|
||||
resized_image.convert("RGBA"), offset, resized_image.convert("RGBA")
|
||||
)
|
||||
|
||||
new_image = new_image.convert("RGB")
|
||||
|
||||
# Convert to numpy array
|
||||
image_array = np.asarray(new_image, dtype=np.float32)
|
||||
|
||||
# Convert PIL-native RGB to BGR
|
||||
image_array = image_array[:, :, ::-1]
|
||||
|
||||
new_image = Image.fromarray(image_array.astype(np.uint8))
|
||||
|
||||
if return_numpy:
|
||||
new_image = np.array(new_image)
|
||||
# If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image
|
||||
# so we need to add it back if necessary.
|
||||
new_image = (
|
||||
np.expand_dims(new_image, axis=-1) if new_image.ndim == 2 else new_image
|
||||
)
|
||||
# The image is always in channels last format after converting from a PIL image
|
||||
new_image = to_channel_dimension_format(
|
||||
new_image, data_format, input_channel_dim=ChannelDimension.LAST
|
||||
)
|
||||
# If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to
|
||||
# rescale it back to the original range.
|
||||
new_image = rescale(new_image, 1 / 255) if do_rescale else new_image
|
||||
|
||||
return new_image
|
||||
|
||||
|
||||
class WDTaggerImageProcessor(BaseImageProcessor):
|
||||
r"""
|
||||
Constructs a WD Tagger image processor.
|
||||
|
||||
Args:
|
||||
do_resize (`bool`, *optional*, defaults to `True`):
|
||||
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
|
||||
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
|
||||
size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`):
|
||||
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
|
||||
method.
|
||||
color (`List[int]`):
|
||||
Color to use for padding the image after resizing. Can be overridden by the `size` parameter in the `preprocess`
|
||||
method.
|
||||
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
|
||||
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
|
||||
`preprocess` method.
|
||||
do_rescale (`bool`, *optional*, defaults to `True`):
|
||||
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
||||
parameter in the `preprocess` method.
|
||||
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
||||
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
||||
`preprocess` method.
|
||||
do_normalize (`bool`, *optional*, defaults to `True`):
|
||||
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
||||
method.
|
||||
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
||||
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
||||
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
||||
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
||||
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
||||
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
||||
"""
|
||||
|
||||
model_input_names = ["pixel_values"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
do_resize: bool = True,
|
||||
size: Optional[Dict[str, int]] = None,
|
||||
color: Optional[List[int]] = None,
|
||||
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
||||
do_rescale: bool = True,
|
||||
rescale_factor: Union[int, float] = 1 / 255,
|
||||
do_normalize: bool = True,
|
||||
image_mean: Optional[Union[float, List[float]]] = None,
|
||||
image_std: Optional[Union[float, List[float]]] = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
super().__init__(**kwargs)
|
||||
size = size if size is not None else {"height": 448, "width": 448}
|
||||
size = get_size_dict(size)
|
||||
color = color if color is not None else [255, 255, 255]
|
||||
self.do_resize = do_resize
|
||||
self.do_rescale = do_rescale
|
||||
self.do_normalize = do_normalize
|
||||
self.size = size
|
||||
self.color = color
|
||||
self.resample = resample
|
||||
self.rescale_factor = rescale_factor
|
||||
self.image_mean = (
|
||||
image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
||||
)
|
||||
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
||||
|
||||
def resize(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
size: Dict[str, int],
|
||||
color: List[int] = [255, 255, 255],
|
||||
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
||||
data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Resize an image to `(size["height"], size["width"])`.
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
Image to resize.
|
||||
size (`Dict[str, int]`):
|
||||
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
|
||||
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
||||
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
|
||||
data_format (`ChannelDimension` or `str`, *optional*):
|
||||
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
||||
image is used. Can be one of:
|
||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
||||
input_data_format (`ChannelDimension` or `str`, *optional*):
|
||||
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
||||
from the input image. Can be one of:
|
||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: The resized image.
|
||||
"""
|
||||
size = get_size_dict(size)
|
||||
if "height" not in size or "width" not in size:
|
||||
raise ValueError(
|
||||
f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}"
|
||||
)
|
||||
|
||||
output_size = (size["height"], size["width"])
|
||||
|
||||
color = tuple(color)
|
||||
|
||||
return resize_with_padding(
|
||||
image,
|
||||
size=output_size,
|
||||
color=color,
|
||||
resample=resample,
|
||||
data_format=data_format,
|
||||
input_data_format=input_data_format,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def preprocess(
|
||||
self,
|
||||
images: ImageInput,
|
||||
do_resize: Optional[bool] = None,
|
||||
size: Optional[Dict[str, int]] = None,
|
||||
color: Optional[List[int]] = None,
|
||||
resample: PILImageResampling = None,
|
||||
do_rescale: Optional[bool] = None,
|
||||
rescale_factor: Optional[float] = None,
|
||||
do_normalize: Optional[bool] = None,
|
||||
image_mean: Optional[Union[float, List[float]]] = None,
|
||||
image_std: Optional[Union[float, List[float]]] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
|
||||
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Preprocess an image or batch of images.
|
||||
|
||||
Args:
|
||||
images (`ImageInput`):
|
||||
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
||||
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
||||
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
||||
Whether to resize the image.
|
||||
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
||||
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
|
||||
resizing.
|
||||
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
|
||||
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
|
||||
an effect if `do_resize` is set to `True`.
|
||||
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
||||
Whether to rescale the image values between [0 - 1].
|
||||
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
||||
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
||||
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
||||
Whether to normalize the image.
|
||||
return_tensors (`str` or `TensorType`, *optional*):
|
||||
The type of tensors to return. Can be one of:
|
||||
- Unset: Return a list of `np.ndarray`.
|
||||
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
||||
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
||||
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
||||
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
||||
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
||||
The channel dimension format for the output image. Can be one of:
|
||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||
- Unset: Use the channel dimension format of the input image.
|
||||
input_data_format (`ChannelDimension` or `str`, *optional*):
|
||||
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
||||
from the input image. Can be one of:
|
||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
||||
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
||||
"""
|
||||
do_resize = do_resize if do_resize is not None else self.do_resize
|
||||
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
||||
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
||||
resample = resample if resample is not None else self.resample
|
||||
rescale_factor = (
|
||||
rescale_factor if rescale_factor is not None else self.rescale_factor
|
||||
)
|
||||
image_mean = image_mean if image_mean is not None else self.image_mean
|
||||
image_std = image_std if image_std is not None else self.image_std
|
||||
|
||||
size = size if size is not None else self.size
|
||||
size_dict = get_size_dict(size)
|
||||
|
||||
color = color if color is not None else self.color
|
||||
|
||||
images = make_list_of_images(images)
|
||||
|
||||
if not valid_images(images):
|
||||
raise ValueError(
|
||||
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
||||
"torch.Tensor, tf.Tensor or jax.ndarray."
|
||||
)
|
||||
|
||||
if do_resize and size is None:
|
||||
raise ValueError("Size must be specified if do_resize is True.")
|
||||
|
||||
if do_rescale and rescale_factor is None:
|
||||
raise ValueError("Rescale factor must be specified if do_rescale is True.")
|
||||
|
||||
# All transformations expect numpy arrays.
|
||||
images = [to_numpy_array(image) for image in images]
|
||||
|
||||
if is_scaled_image(images[0]) and do_rescale:
|
||||
logger.warning_once(
|
||||
"It looks like you are trying to rescale already rescaled images. If the input"
|
||||
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
||||
)
|
||||
|
||||
if input_data_format is None:
|
||||
# We assume that all images have the same channel dimension format.
|
||||
input_data_format = infer_channel_dimension_format(images[0])
|
||||
|
||||
if do_resize:
|
||||
images = [
|
||||
self.resize(
|
||||
image=image,
|
||||
size=size_dict,
|
||||
color=color,
|
||||
resample=resample,
|
||||
input_data_format=input_data_format,
|
||||
)
|
||||
for image in images
|
||||
]
|
||||
|
||||
if do_rescale:
|
||||
images = [
|
||||
self.rescale(
|
||||
image=image,
|
||||
scale=rescale_factor,
|
||||
input_data_format=input_data_format,
|
||||
)
|
||||
for image in images
|
||||
]
|
||||
|
||||
if do_normalize:
|
||||
images = [
|
||||
self.normalize(
|
||||
image=image,
|
||||
mean=image_mean,
|
||||
std=image_std,
|
||||
input_data_format=input_data_format,
|
||||
)
|
||||
for image in images
|
||||
]
|
||||
|
||||
images = [
|
||||
to_channel_dimension_format(
|
||||
image, data_format, input_channel_dim=input_data_format
|
||||
)
|
||||
for image in images
|
||||
]
|
||||
|
||||
data = {"pixel_values": images}
|
||||
return BatchFeature(data=data, tensor_type=return_tensors)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,37 @@
|
|||
{
|
||||
"one_external_file": true,
|
||||
"opset": null,
|
||||
"optimization": {},
|
||||
"optimum_version": "1.17.1",
|
||||
"quantization": {
|
||||
"activations_dtype": "QUInt8",
|
||||
"activations_symmetric": false,
|
||||
"format": "QOperator",
|
||||
"is_static": false,
|
||||
"mode": "IntegerOps",
|
||||
"nodes_to_exclude": [
|
||||
"/swinv2/embeddings/patch_embeddings/projection/Conv"
|
||||
],
|
||||
"nodes_to_quantize": [],
|
||||
"operators_to_quantize": [
|
||||
"Conv",
|
||||
"MatMul",
|
||||
"Attention",
|
||||
"LSTM",
|
||||
"Gather",
|
||||
"Transpose",
|
||||
"EmbedLayerNormalization"
|
||||
],
|
||||
"per_channel": true,
|
||||
"qdq_add_pair_to_weight": false,
|
||||
"qdq_dedicated_pair": false,
|
||||
"qdq_op_type_per_channel_support_to_axis": {
|
||||
"MatMul": 1
|
||||
},
|
||||
"reduce_range": false,
|
||||
"weights_dtype": "QInt8",
|
||||
"weights_symmetric": true
|
||||
},
|
||||
"transformers_version": "4.38.1",
|
||||
"use_external_data_format": false
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"image_processor_type": "WDTaggerImageProcessor",
|
||||
"auto_map": {
|
||||
"AutoImageProcessor": "image_processing_tagger.WDTaggerImageProcessor"
|
||||
},
|
||||
"color": [
|
||||
255,
|
||||
255,
|
||||
255
|
||||
],
|
||||
"do_normalize": true,
|
||||
"do_rescale": true,
|
||||
"do_resize": true,
|
||||
"image_mean": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"image_std": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"resample": 3,
|
||||
"rescale_factor": 0.00392156862745098,
|
||||
"size": {
|
||||
"height": 448,
|
||||
"width": 448
|
||||
}
|
||||
}
|
Binary file not shown.
After Width: | Height: | Size: 478 KiB |
Loading…
Reference in New Issue