1. docker pull
docker pull pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel
2. docker run
docker run -itd --gpus all --restart always --name dift -v /data:/usr/src/data pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel /bin/bash
docker run -itd --gpus all --restart always --name dift -v /mnt/d/data :/usr/src/data pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel /bin/bash
3. docker attach
docker attach dift
4. git clone
apt-get update
apt-get install -y git
git clone https://github.com/Tsingularity/dift
cd dift
conda env create -f environment.yml
activate dift
pip install diffusers
pip install matplotlib
pip install transformers==4.28.0
pip install huggingface-hub==0.24.0
# torch version setting
pip uninstall torch -y
conda install pytorch=2.0.1 torchvision=0.15.2 torchaudio=2.0.2 pytorch-cuda=11.7 -c pytorch -c nvidia
pip install --upgrade torch torchvision
5. run dift
python extract_dift.py --input_path ./assets/cat.png --output_path dift_cat.pt --img_size 768 768 --t 261 --up_ft_index 1 --prompt 'a photo of a cat' --ensemble_size 8
from diffusers import StableDiffusionPipeline
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from typing import Any, Callable, Dict, List, Optional, Union
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from diffusers import DDIMScheduler
import gc
import os
from PIL import Image
from torchvision.transforms import PILToTensor
class MyUNet2DConditionModel(UNet2DConditionModel):
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
up_ft_indices,
encoder_hidden_states: torch.Tensor,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None):
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
# logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
# 2. pre-process
sample = self.conv_in(sample)
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
# 4. mid
if self.mid_block is not None:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
)
# 5. up
up_ft = {}
for i, upsample_block in enumerate(self.up_blocks):
if i > np.max(up_ft_indices):
break
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
)
if i in up_ft_indices:
up_ft[i] = sample.detach()
output = {}
output['up_ft'] = up_ft
return output
class OneStepSDPipeline(StableDiffusionPipeline):
@torch.no_grad()
def __call__(
self,
img_tensor,
t,
up_ft_indices,
negative_prompt: Optional[Union[str, List[str]]] = None,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None
):
device = self._execution_device
latents = self.vae.encode(img_tensor).latent_dist.sample() * self.vae.config.scaling_factor
t = torch.tensor(t, dtype=torch.long, device=device)
noise = torch.randn_like(latents).to(device)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
unet_output = self.unet(latents_noisy,
t,
up_ft_indices,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs)
return unet_output
class SDFeaturizer:
def __init__(self, sd_id='stabilityai/stable-diffusion-2-1', null_prompt=''):
unet = MyUNet2DConditionModel.from_pretrained(sd_id, subfolder="unet")
onestep_pipe = OneStepSDPipeline.from_pretrained(sd_id, unet=unet, safety_checker=None)
onestep_pipe.vae.decoder = None
onestep_pipe.scheduler = DDIMScheduler.from_pretrained(sd_id, subfolder="scheduler")
gc.collect()
onestep_pipe = onestep_pipe.to("cuda")
onestep_pipe.enable_attention_slicing()
# onestep_pipe.enable_xformers_memory_efficient_attention()
null_prompt_embeds = onestep_pipe._encode_prompt(
prompt=null_prompt,
device='cuda',
num_images_per_prompt=1,
do_classifier_free_guidance=False) # [1, 77, dim]
self.null_prompt_embeds = null_prompt_embeds
self.null_prompt = null_prompt
self.pipe = onestep_pipe
@torch.no_grad()
def forward(self,
img_tensor,
prompt='',
t=261,
up_ft_index=1,
ensemble_size=8):
'''
Args:
img_tensor: should be a single torch tensor in the shape of [1, C, H, W] or [C, H, W]
prompt: the prompt to use, a string
t: the time step to use, should be an int in the range of [0, 1000]
up_ft_index: which upsampling block of the U-Net to extract feature, you can choose [0, 1, 2, 3]
ensemble_size: the number of repeated images used in the batch to extract features
Return:
unet_ft: a torch tensor in the shape of [1, c, h, w]
'''
img_tensor = img_tensor.repeat(ensemble_size, 1, 1, 1).cuda() # ensem, c, h, w
if prompt == self.null_prompt:
prompt_embeds = self.null_prompt_embeds
else:
prompt_embeds = self.pipe._encode_prompt(
prompt=prompt,
device='cuda',
num_images_per_prompt=1,
do_classifier_free_guidance=False) # [1, 77, dim]
prompt_embeds = prompt_embeds.repeat(ensemble_size, 1, 1)
unet_ft_all = self.pipe(
img_tensor=img_tensor,
t=t,
up_ft_indices=[up_ft_index],
prompt_embeds=prompt_embeds)
unet_ft = unet_ft_all['up_ft'][up_ft_index] # ensem, c, h, w
unet_ft = unet_ft.mean(0, keepdim=True) # 1,c,h,w
return unet_ft
import torch
from PIL import Image
from torchvision.transforms import PILToTensor
if __name__ == '__main__':
from PIL import Image, ImageDraw
# model_id
model_id = "sd-legacy/stable-diffusion-v1-5"
# define image and point
img1 = Image.open('image1.png').convert('RGB')
img2 = Image.open('image2.png').convert('RGB')
point = [[477, 216]]
img1 = Image.open('dog1.jpg').convert('RGB')
img2 = Image.open('dog2.jpg').convert('RGB')
point = [[177, 158], [229, 177], [355, 193]]
w1_orig, h1_orig = img1.size
w2_orig, h2_orig = img2.size
# preprocessing
img1 = img1.resize((512,512))
img_tensor1 = (PILToTensor()(img1) / 255.0 - 0.5) * 2
img2 = img2.resize((512,512))
img_tensor2 = (PILToTensor()(img2) / 255.0 - 0.5) * 2
# scale points
resized_points = []
for (x, y) in point:
x_resized = int(x * 512 / w1_orig)
y_resized = int(y * 512 / h1_orig)
resized_points.append([x_resized, y_resized])
point = resized_points
# -------------------------이미지 출력 -------------------------
# 이미지 복사해서 그리기
img_draw = img1.copy()
draw = ImageDraw.Draw(img_draw)
# 원 그리기
for x, y in point:
r = 5 # 반지름
draw.ellipse((x - r, y - r, x + r, y + r), fill='red')
# 시각화
plt.figure(figsize=(8, 8))
plt.imshow(img_draw)
plt.title("Points on images")
plt.axis('off')
plt.show()
# -------------------------이미지 출력 -------------------------
with torch.no_grad():
dift = SDFeaturizer(model_id)
ft1 = dift.forward(img_tensor1,
prompt='',
t=261,
up_ft_index=1,
ensemble_size=8)
ft2 = dift.forward(img_tensor2,
prompt='',
t=261,
up_ft_index=1,
ensemble_size=8)
print("ft1.shape: ", ft1.shape)
print("ft2.shape: ", ft2.shape)
from torch.functional import F
import einops
#
source_features_upsampled = F.interpolate(ft1, tuple(reversed(img1.size)), mode="bilinear")
target_features_upsampled = F.interpolate(ft2, tuple(reversed(img2.size)), mode="bilinear")
source_points = torch.tensor(point).to(ft1.device).long()
source_point_feats = source_features_upsampled[0, :, source_points[:, 1], source_points[:, 0]].T[:, None]
target_features_upsampled_norm_flat = einops.rearrange(target_features_upsampled / target_features_upsampled.norm(p=2, dim=1, keepdim=True), '1 c h w -> c (h w)')
source_point_feats_norm = source_point_feats / source_point_feats.norm(p=2, dim=-1, keepdim=True)
target_feat_sims = einops.rearrange(source_point_feats_norm @ target_features_upsampled_norm_flat, 'b 1 (h w) -> b h w', h=target_features_upsampled.shape[-2])
matches = torch.stack(torch.unravel_index(einops.rearrange(target_feat_sims, "b h w -> b (h w)").argmax(dim=-1), target_feat_sims.shape[1:])).T
print("matches: ", matches)
# matches 좌표 , y, x 순으로 저장되어 있음
import cv2
# PIL → OpenCV 변환 (RGB → BGR)
img1_cv = cv2.cvtColor(np.array(img1), cv2.COLOR_RGB2BGR)
img2_cv = cv2.cvtColor(np.array(img2), cv2.COLOR_RGB2BGR)
# 두 이미지 좌우 연결
h1, w1 = img1_cv.shape[:2]
h2, w2 = img2_cv.shape[:2]
combined = np.zeros((max(h1, h2), w1 + w2, 3), dtype=np.uint8)
combined[:h1, :w1] = img1_cv
combined[:h2, w1:] = img2_cv
# 점 시각화
for (pt1, pt2) in zip(point, matches.tolist()):
pt1 = tuple(map(int, pt1))
# pt2 = tuple(map(int, pt2))
pt2 = tuple(map(int, [pt2[1], pt2[0]]))
pt2_offset = (pt2[0] + w1, pt2[1]) # img2는 오른쪽에 있으므로 offset 추가
# 점 찍기
cv2.circle(combined, pt1, 5, (0, 255, 0), -1)
cv2.circle(combined, pt2_offset, 5, (0, 0, 255), -1)
# 선 긋기
cv2.line(combined, pt1, pt2_offset, (255, 0, 0), 2)
# BGR → RGB for matplotlib
combined_rgb = cv2.cvtColor(combined, cv2.COLOR_BGR2RGB)
# 시각화 출력
plt.figure(figsize=(16, 8))
plt.imshow(combined_rgb)
plt.title("Matched Points Visualization")
plt.axis('off')
plt.show()
# tensor([[152, 464]], device='cuda:0')
# draw matches on image1 and image2
# img1 = img1.convert('RGB')
# img2 = img2.convert('RGB')
# import matplotlib.pyplot as plt
# plt.imshow(img1)
# plt.scatter(point[0], point[1], color='red')
# plt.show()
# # draw point on image1
# img1 = img1.convert('RGB')
# import matplotlib.pyplot as plt
# plt.imshow(img1)
# plt.scatter(point[0], point[1], color='red')
# plt.show()
# get corresponding point on image2
# using ft1 to get the feature map
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
import torch
import time
from diffusers import DDPMScheduler, DDIMScheduler
from huggingface_hub import login
login(token="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")
# -------------------
# 0. set configs
# -------------------
model_id = "sd-legacy/stable-diffusion-v1-5"
# model_id = "runwayml/stable-diffusion-v1-5"
prompt = "A dog with sunglasses, wearing comfy hat, looking at camera, highly detailed, ultra sharp, cinematic, 100mm lens, 8k resolution."
# 파이프라인 초기화 (한 번만)
StableDiffusionImg2ImgPipeline
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
# # -------------------
# # 1. DDPM 스케줄러 사용
# # -------------------
pipe.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
scheduler_name = 'ddpm'
num_steps = 10
start = time.time()
image = pipe(prompt, num_inference_steps=num_steps).images[0]
end = time.time()
elapsed_ms = int((end - start) * 1000)
filename = f"{prompt}_{scheduler_name}_{elapsed_ms}ms.png"
image.save(filename)
print(f"{scheduler_name} - {elapsed_ms}ms -> Saved as: {filename}")
# -------------------
# 2. DDIM 스케줄러 사용
# -------------------
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# scheduler_name = 'ddim'
# num_steps = 50
start = time.time()
image = pipe(prompt, num_inference_steps=num_steps).images[0]
end = time.time()
elapsed_ms = int((end - start) * 1000)
filename = f"{prompt}_{scheduler_name}_{elapsed_ms}ms.png"
image.save(filename)
print(f"{scheduler_name} - {elapsed_ms}ms -> Saved as: {filename}")'Docker > Docker for Linux' 카테고리의 다른 글
| [Docker] OV-DINO 데모 실행 (docker이용) (7) | 2025.07.10 |
|---|---|
| [Docker] elem2design 만들기 (0) | 2025.07.03 |
| [Docker] Sam2.1 돌려보기 [linux] (0) | 2025.05.13 |
| [Docker] Linux docker 환경에서 a1111 실행해 보기 (0) | 2023.08.30 |
| [Linux Server] Docker 튜토리얼1 (Ubuntu server22.04.03 LTS 에서 pytorch(cuda11) 컨테이너 만들기.) (1) | 2023.08.01 |
댓글