From 462a9d329833fab67157c41c1ffb3f2d28bfb702 Mon Sep 17 00:00:00 2001 From: Andres Caicedo Date: Mon, 8 May 2023 14:42:53 +0200 Subject: [PATCH] Reformatting --- ldm/models/diffusion/ddim.py | 3 +- ldm/models/diffusion/ddpm.py | 5 +- ldm/models/diffusion/plms.py | 2 +- ldm/modules/attention.py | 2 +- ldm/modules/diffusionmodules/openaimodel.py | 4 +- ldm/modules/diffusionmodules/util.py | 2 +- ldm/modules/image_degradation/bsrgan.py | 6 +- ldm/modules/image_degradation/bsrgan_light.py | 6 +- ldm/modules/image_degradation/utils_image.py | 76 ++++++++----------- ldm/modules/karlo/kakao/models/clip.py | 2 +- .../modules/diffusion/gaussian_diffusion.py | 6 +- ldm/modules/karlo/kakao/sampler.py | 1 - ldm/modules/midas/midas/dpt_depth.py | 1 - ldm/modules/midas/utils.py | 1 - requirements.txt | 2 +- 15 files changed, 47 insertions(+), 72 deletions(-) diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py index 786dcf4..a0eb522 100644 --- a/ldm/models/diffusion/ddim.py +++ b/ldm/models/diffusion/ddim.py @@ -360,7 +360,7 @@ class DDIMSampler(object): raise NotImplementedError() # direction pointing to x_t - dir_xt = (1.0 - a_prev - sigma_t ** 2).sqrt() * e_t + dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) @@ -472,7 +472,6 @@ class DDIMSampler(object): use_original_steps=False, callback=None, ): - timesteps = ( np.arange(self.ddpm_num_timesteps) if use_original_steps diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py index 6162192..75ad0e1 100644 --- a/ldm/models/diffusion/ddpm.py +++ b/ldm/models/diffusion/ddpm.py @@ -242,7 +242,7 @@ class DDPM(pl.LightningModule): ) if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( + lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) @@ -256,7 +256,7 @@ class DDPM(pl.LightningModule): ) elif self.parameterization == "v": lvlb_weights = torch.ones_like( - self.betas ** 2 + self.betas**2 / ( 2 * self.posterior_variance @@ -1358,7 +1358,6 @@ class LatentDiffusion(DDPM): start_T=None, log_every_t=None, ): - if not log_every_t: log_every_t = self.log_every_t device = self.betas.device diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py index cb6267f..f82e829 100644 --- a/ldm/models/diffusion/plms.py +++ b/ldm/models/diffusion/plms.py @@ -339,7 +339,7 @@ class PLMSSampler(object): if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t - dir_xt = (1.0 - a_prev - sigma_t ** 2).sqrt() * e_t + dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index faf00f4..21bf1e1 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -144,7 +144,7 @@ class CrossAttention(nn.Module): inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) - self.scale = dim_head ** -0.5 + self.scale = dim_head**-0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py index 21253eb..44e1f2d 100644 --- a/ldm/modules/diffusionmodules/openaimodel.py +++ b/ldm/modules/diffusionmodules/openaimodel.py @@ -43,7 +43,7 @@ class AttentionPool2d(nn.Module): ): super().__init__() self.positional_embedding = nn.Parameter( - th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5 + th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5 ) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) @@ -354,7 +354,7 @@ def count_flops_attn(model, _x, y): # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c + matmul_ops = 2 * b * (num_spatial**2) * c model.total_ops += th.DoubleTensor([matmul_ops]) diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py index f7cc16f..2ace948 100644 --- a/ldm/modules/diffusionmodules/util.py +++ b/ldm/modules/diffusionmodules/util.py @@ -25,7 +25,7 @@ def make_beta_schedule( if schedule == "linear": betas = ( torch.linspace( - linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64 + linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64 ) ** 2 ) diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py index 346ff9b..3b48bea 100644 --- a/ldm/modules/image_degradation/bsrgan.py +++ b/ldm/modules/image_degradation/bsrgan.py @@ -403,7 +403,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img = img + np.random.multivariate_normal( - [0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2] + [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img @@ -427,7 +427,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25): U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img += img * np.random.multivariate_normal( - [0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2] + [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img @@ -519,7 +519,6 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): ) for i in shuffle_order: - if i == 0: img = add_blur(img, sf=sf) @@ -623,7 +622,6 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): ) for i in shuffle_order: - if i == 0: image = add_blur(image, sf=sf) diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py index 03020db..8995bb0 100644 --- a/ldm/modules/image_degradation/bsrgan_light.py +++ b/ldm/modules/image_degradation/bsrgan_light.py @@ -404,7 +404,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img = img + np.random.multivariate_normal( - [0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2] + [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img @@ -428,7 +428,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25): U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img += img * np.random.multivariate_normal( - [0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2] + [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img @@ -520,7 +520,6 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): ) for i in shuffle_order: - if i == 0: img = add_blur(img, sf=sf) @@ -624,7 +623,6 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False): ) for i in shuffle_order: - if i == 0: image = add_blur(image, sf=sf) diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py index 2f95aa1..c933af5 100644 --- a/ldm/modules/image_degradation/utils_image.py +++ b/ldm/modules/image_degradation/utils_image.py @@ -271,22 +271,18 @@ def read_img(path): def uint2single(img): - return np.float32(img / 255.0) def single2uint(img): - return np.uint8((img.clip(0, 1) * 255.0).round()) def uint162single(img): - return np.float32(img / 65535.0) def single2uint16(img): - return np.uint16((img.clip(0, 1) * 65535.0).round()) @@ -586,18 +582,14 @@ def rgb2ycbcr(img, only_y=True): if only_y: rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 else: - rlt = ( - np.matmul( - img, - [ - [65.481, -37.797, 112.0], - [128.553, -74.203, -93.786], - [24.966, 112.0, -18.214], - ], - ) - / 255.0 - + [16, 128, 128] - ) + rlt = np.matmul( + img, + [ + [65.481, -37.797, 112.0], + [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214], + ], + ) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: @@ -616,18 +608,14 @@ def ycbcr2rgb(img): if in_img_type != np.uint8: img *= 255.0 # convert - rlt = ( - np.matmul( - img, - [ - [0.00456621, 0.00456621, 0.00456621], - [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0], - ], - ) - * 255.0 - + [-222.921, 135.576, -276.836] - ) + rlt = np.matmul( + img, + [ + [0.00456621, 0.00456621, 0.00456621], + [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0], + ], + ) * 255.0 + [-222.921, 135.576, -276.836] if in_img_type == np.uint8: rlt = rlt.round() else: @@ -650,18 +638,14 @@ def bgr2ycbcr(img, only_y=True): if only_y: rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 else: - rlt = ( - np.matmul( - img, - [ - [24.966, 112.0, -18.214], - [128.553, -74.203, -93.786], - [65.481, -37.797, 112.0], - ], - ) - / 255.0 - + [16, 128, 128] - ) + rlt = np.matmul( + img, + [ + [24.966, 112.0, -18.214], + [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0], + ], + ) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: @@ -752,11 +736,11 @@ def ssim(img1, img2): mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1 ** 2 - mu2_sq = mu2 ** 2 + mu1_sq = mu1**2 + mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( @@ -775,8 +759,8 @@ def ssim(img1, img2): # matlab 'imresize' function, now only support 'bicubic' def cubic(x): absx = torch.abs(x) - absx2 = absx ** 2 - absx3 = absx ** 3 + absx2 = absx**2 + absx3 = absx**3 return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + ( -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2 ) * (((absx > 1) * (absx <= 2)).type_as(absx)) diff --git a/ldm/modules/karlo/kakao/models/clip.py b/ldm/modules/karlo/kakao/models/clip.py index 7b8d030..8cebead 100644 --- a/ldm/modules/karlo/kakao/models/clip.py +++ b/ldm/modules/karlo/kakao/models/clip.py @@ -106,7 +106,7 @@ class CustomizedCLIP(CLIP): ) vision_patch_size = None assert ( - output_width ** 2 + 1 + output_width**2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] ) image_resolution = output_width * 32 diff --git a/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py b/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py index 7fc5167..6a111aa 100644 --- a/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py +++ b/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py @@ -26,8 +26,8 @@ def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_time if beta_schedule == "quad": betas = ( np.linspace( - beta_start ** 0.5, - beta_end ** 0.5, + beta_start**0.5, + beta_end**0.5, num_diffusion_timesteps, dtype=np.float64, ) @@ -681,7 +681,7 @@ class GaussianDiffusion(th.nn.Module): noise = th.randn_like(x) mean_pred = ( out["pred_xstart"] * th.sqrt(alpha_bar_prev) - + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps + + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps ) nonzero_mask = ( (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) diff --git a/ldm/modules/karlo/kakao/sampler.py b/ldm/modules/karlo/kakao/sampler.py index f5453bd..f9355be 100644 --- a/ldm/modules/karlo/kakao/sampler.py +++ b/ldm/modules/karlo/kakao/sampler.py @@ -36,7 +36,6 @@ class T2ISampler(BaseSampler): clip_stat_path: str, sampling_type: str = "default", ): - model = cls( root_dir=root_dir, sampling_type=sampling_type, diff --git a/ldm/modules/midas/midas/dpt_depth.py b/ldm/modules/midas/midas/dpt_depth.py index 526db6b..d877bfd 100644 --- a/ldm/modules/midas/midas/dpt_depth.py +++ b/ldm/modules/midas/midas/dpt_depth.py @@ -33,7 +33,6 @@ class DPT(BaseModel): channels_last=False, use_bn=False, ): - super(DPT, self).__init__() self.channels_last = channels_last diff --git a/ldm/modules/midas/utils.py b/ldm/modules/midas/utils.py index d327b48..47956b7 100644 --- a/ldm/modules/midas/utils.py +++ b/ldm/modules/midas/utils.py @@ -17,7 +17,6 @@ def read_pfm(path): tuple: (data, scale) """ with open(path, "rb") as file: - color = None width = None height = None diff --git a/requirements.txt b/requirements.txt index 7073dfd..eae4d18 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ gradio==3.13.2 kornia==0.6 invisible-watermark>=0.1.5 streamlit-drawable-canvas==0.8.0 -black==21.9b0 +black==23.3.0 isort==5.9.3 flake8==4.0.1 click==8.0.3