Skip to content

Commit 19f4497

Browse files
committed
Revert "add blurs"
This reverts commit e33f30e.
1 parent e33f30e commit 19f4497

File tree

2 files changed

+5
-24
lines changed

2 files changed

+5
-24
lines changed

setup.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
'stylegan2_pytorch = stylegan2_pytorch.cli:main',
99
],
1010
},
11-
version = '1.0.3',
11+
version = '1.0.2',
1212
license='GPLv3+',
1313
description = 'StyleGan2 in Pytorch',
1414
author = 'Phil Wang',
@@ -17,16 +17,15 @@
1717
download_url = 'https://github.com/lucidrains/stylegan2-pytorch/archive/v_036.tar.gz',
1818
keywords = ['generative adversarial networks', 'artificial intelligence'],
1919
install_requires=[
20-
'contrastive_learner>=0.1.0',
2120
'fire',
22-
'kornia',
23-
'linear_attention_transformer',
2421
'numpy',
2522
'retry',
2623
'tqdm',
2724
'torch',
2825
'torchvision',
2926
'pillow',
27+
'contrastive_learner>=0.1.0',
28+
'linear_attention_transformer',
3029
'vector-quantize-pytorch'
3130
],
3231
classifiers=[

stylegan2_pytorch/stylegan2_pytorch.py

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@
2323
from torch.utils.data.distributed import DistributedSampler
2424
from torch.nn.parallel import DistributedDataParallel as DDP
2525

26-
from kornia.filters import filter2D
27-
2826
import torchvision
2927
from torchvision import transforms
3028
from stylegan2_pytorch.diff_augment import DiffAugment
@@ -103,16 +101,6 @@ def forward(self, x):
103101
out = out.permute(0, 3, 1, 2)
104102
return out, loss
105103

106-
class Blur(nn.Module):
107-
def __init__(self):
108-
super().__init__()
109-
f = torch.Tensor([1, 2, 1])
110-
self.register_buffer('f', f)
111-
def forward(self, x):
112-
f = self.f
113-
f = f[None, None, :] * f [None, :, None]
114-
return filter2D(x, f, normalized=True)
115-
116104
# one layer of self-attention and feedforward, for images
117105

118106
attn_and_ff = lambda chan: nn.Sequential(*[
@@ -364,10 +352,7 @@ def __init__(self, latent_dim, input_channel, upsample, rgba = False):
364352
out_filters = 3 if not rgba else 4
365353
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
366354

367-
self.upsample = nn.Sequential(
368-
nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False),
369-
Blur()
370-
) if upsample else None
355+
self.upsample = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False) if upsample else None
371356

372357
def forward(self, x, prev_rgb, istyle):
373358
b, c, h, w = x.shape
@@ -465,10 +450,7 @@ def __init__(self, input_channels, filters, downsample=True):
465450
leaky_relu()
466451
)
467452

468-
self.downsample = nn.Sequential(
469-
Blur(),
470-
nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
471-
) if downsample else None
453+
self.downsample = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None
472454

473455
def forward(self, x):
474456
res = self.conv_res(x)

0 commit comments

Comments
 (0)