commit d0478c8ed3946c00b629bd1b6ffca15c21326295 Author: ailab Date: Sat Jun 8 01:43:43 2024 +0800 first commit diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a6344aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..5c54bdd --- /dev/null +++ b/README.md @@ -0,0 +1,157 @@ + +--- +license: openrail++ +base_model: stabilityai/stable-diffusion-xl-base-1.0 +tags: +- stable-diffusion-xl +- stable-diffusion-xl-diffusers +- text-to-image +- diffusers +- controlnet +inference: false +--- + +# SDXL-controlnet: Zoe-Depth + +These are ControlNet weights trained on stabilityai/stable-diffusion-xl-base-1.0 with zoe depth conditioning. [Zoe-depth](https://github.com/isl-org/ZoeDepth) is an open-source SOTA depth estimation model which produces high-quality depth maps, which are better suited for conditioning. + +You can find some example images in the following. + +![images_0)](./zoe-depth-example.png) + +![images_2](./zoe-megatron.png) + +![images_3](./photo-woman.png) + +## Usage + +Make sure first to install the libraries: + +```bash +pip install accelerate transformers safetensors diffusers +``` + +And then setup the zoe-depth model + +``` +import torch +import matplotlib +import matplotlib.cm +import numpy as np + +torch.hub.help("intel-isl/MiDaS", "DPT_BEiT_L_384", force_reload=True) # Triggers fresh download of MiDaS repo +model_zoe_n = torch.hub.load("isl-org/ZoeDepth", "ZoeD_NK", pretrained=True).eval() +model_zoe_n = model_zoe_n.to("cuda") + + +def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None): + if isinstance(value, torch.Tensor): + value = value.detach().cpu().numpy() + + value = value.squeeze() + if invalid_mask is None: + invalid_mask = value == invalid_val + mask = np.logical_not(invalid_mask) + + # normalize + vmin = np.percentile(value[mask],2) if vmin is None else vmin + vmax = np.percentile(value[mask],85) if vmax is None else vmax + if vmin != vmax: + value = (value - vmin) / (vmax - vmin) # vmin..vmax + else: + # Avoid 0-division + value = value * 0. + + # squeeze last dim if it exists + # grey out the invalid values + + value[invalid_mask] = np.nan + cmapper = matplotlib.cm.get_cmap(cmap) + if value_transform: + value = value_transform(value) + # value = value / value.max() + value = cmapper(value, bytes=True) # (nxmx4) + + # img = value[:, :, :] + img = value[...] + img[invalid_mask] = background_color + + # gamma correction + img = img / 255 + img = np.power(img, 2.2) + img = img * 255 + img = img.astype(np.uint8) + img = Image.fromarray(img) + return img + + +def get_zoe_depth_map(image): + with torch.autocast("cuda", enabled=True): + depth = model_zoe_n.infer_pil(image) + depth = colorize(depth, cmap="gray_r") + return depth +``` + +Now we're ready to go: + +```python +import torch +import numpy as np +from PIL import Image + +from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL +from diffusers.utils import load_image + +controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-zoe-depth-sdxl-1.0", + use_safetensors=True, + torch_dtype=torch.float16, +).to("cuda") +vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda") +pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + controlnet=controlnet, + vae=vae, + variant="fp16", + use_safetensors=True, + torch_dtype=torch.float16, +).to("cuda") +pipe.enable_model_cpu_offload() + + +prompt = "pixel-art margot robbie as barbie, in a coupé . low-res, blocky, pixel art style, 8-bit graphics" +negative_prompt = "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic" +image = load_image("https://media.vogue.fr/photos/62bf04b69a57673c725432f3/3:2/w_1793,h_1195,c_limit/rev-1-Barbie-InstaVert_High_Res_JPEG.jpeg") + +controlnet_conditioning_scale = 0.55 + +depth_image = get_zoe_depth_map(image).resize((1088, 896)) + +generator = torch.Generator("cuda").manual_seed(978364352) +images = pipe( + prompt, image=depth_image, num_inference_steps=50, controlnet_conditioning_scale=controlnet_conditioning_scale, generator=generator +).images +images[0] + +images[0].save(f"pixel-barbie.png") +``` + +![images_1)](./barbie.png) + +To more details, check out the official documentation of [`StableDiffusionXLControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/controlnet_sdxl). + +### Training + +Our training script was built on top of the official training script that we provide [here](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/README_sdxl.md). + +#### Training data and Compute +The model is trained on 3M image-text pairs from LAION-Aesthetics V2. The model is trained for 700 GPU hours on 80GB A100 GPUs. + +#### Batch size +Data parallel with a single gpu batch size of 8 for a total batch size of 256. + +#### Hyper Parameters +Constant learning rate of 1e-5. + +#### Mixed precision +fp16 \ No newline at end of file diff --git a/barbie.png b/barbie.png new file mode 100644 index 0000000..5d6a3c7 Binary files /dev/null and b/barbie.png differ diff --git a/config.json b/config.json new file mode 100644 index 0000000..aba54b3 --- /dev/null +++ b/config.json @@ -0,0 +1,57 @@ +{ + "_class_name": "ControlNetModel", + "_diffusers_version": "0.20.0.dev0", + "_name_or_path": "valhalla/zoe-depth", + "act_fn": "silu", + "addition_embed_type": "text_time", + "addition_embed_type_num_heads": 64, + "addition_time_embed_dim": 256, + "attention_head_dim": [ + 5, + 10, + 20 + ], + "block_out_channels": [ + 320, + 640, + 1280 + ], + "class_embed_type": null, + "conditioning_channels": 3, + "conditioning_embedding_out_channels": [ + 16, + 32, + 96, + 256 + ], + "controlnet_conditioning_channel_order": "rgb", + "cross_attention_dim": 2048, + "down_block_types": [ + "DownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D" + ], + "downsample_padding": 1, + "encoder_hid_dim": null, + "encoder_hid_dim_type": null, + "flip_sin_to_cos": true, + "freq_shift": 0, + "global_pool_conditions": false, + "in_channels": 4, + "layers_per_block": 2, + "mid_block_scale_factor": 1, + "norm_eps": 1e-05, + "norm_num_groups": 32, + "num_attention_heads": null, + "num_class_embeds": null, + "only_cross_attention": false, + "projection_class_embeddings_input_dim": 2816, + "resnet_time_scale_shift": "default", + "transformer_layers_per_block": [ + 1, + 2, + 10 + ], + "upcast_attention": null, + "use_linear_projection": true +} diff --git a/diffusion_pytorch_model.safetensors b/diffusion_pytorch_model.safetensors new file mode 100644 index 0000000..d59db73 --- /dev/null +++ b/diffusion_pytorch_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cd9dad0c25b90a548f2598b575505884d9b535ad17f3e13b8a1701554200ecd +size 2502139134 diff --git a/photo-woman.png b/photo-woman.png new file mode 100644 index 0000000..e195748 Binary files /dev/null and b/photo-woman.png differ diff --git a/zoe-depth-example.png b/zoe-depth-example.png new file mode 100644 index 0000000..c0cc5a3 Binary files /dev/null and b/zoe-depth-example.png differ diff --git a/zoe-megatron.png b/zoe-megatron.png new file mode 100644 index 0000000..aef6d99 Binary files /dev/null and b/zoe-megatron.png differ