Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.

Commit 1997066

Browse files
[pre-commit.ci] pre-commit suggestions (#375)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com>
1 parent 07911b9 commit 1997066

File tree

9 files changed

+21
-27
lines changed

9 files changed

+21
-27
lines changed

.pre-commit-config.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ ci:
99

1010
repos:
1111
- repo: https://github.com/pre-commit/pre-commit-hooks
12-
rev: v4.6.0
12+
rev: v5.0.0
1313
hooks:
1414
- id: end-of-file-fixer
1515
- id: trailing-whitespace
@@ -45,7 +45,7 @@ repos:
4545
args: ["--print-width=120"]
4646

4747
- repo: https://github.com/executablebooks/mdformat
48-
rev: 0.7.17
48+
rev: 0.7.21
4949
hooks:
5050
- id: mdformat
5151
additional_dependencies:
@@ -55,7 +55,7 @@ repos:
5555
args: ["--number"]
5656

5757
- repo: https://github.com/astral-sh/ruff-pre-commit
58-
rev: v0.5.0
58+
rev: v0.8.6
5959
hooks:
6060
# try to fix what is possible
6161
- id: ruff

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ The addition has to formed as new folder:
4444
accelerator:
4545
- CPU
4646
```
47-
- _\[optional\]_ requirements listed in `requirements.txt` in the particular folder (in case you need some other packaged then listed the parent folder)
47+
- _[optional]_ requirements listed in `requirements.txt` in the particular folder (in case you need some other packaged then listed the parent folder)
4848

4949
## Using datasets
5050

5151
It is quite common to use some public or competition's dataset for your example.
5252
We facilitate this via defining the data sources in the metafile.
53-
There are two basic options, download a file from web or pul Kaggle dataset _\[Experimental\]_:
53+
There are two basic options, download a file from web or pul Kaggle dataset _[Experimental]_:
5454

5555
```yaml
5656
datasets:

course_UvA-DL/03-initialization-and-optimization/notebook.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def plot_dists(val_dict, color="C0", xlabel=None, stat="count", use_kde=True):
225225
kde=use_kde and ((val_dict[key].max() - val_dict[key].min()) > 1e-8),
226226
) # Only plot kde if there is variance
227227
hidden_dim_str = (
228-
r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else ""
228+
r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else "" # noqa: UP031
229229
)
230230
key_ax.set_title(f"{key} {hidden_dim_str}")
231231
if xlabel is not None:

course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -633,8 +633,8 @@ def forward(self, x):
633633
fig, ax = plt.subplots(2, 2, figsize=(12, 4))
634634
ax = [a for a_list in ax for a in a_list]
635635
for i in range(len(ax)):
636-
ax[i].plot(np.arange(1, 17), pe[i, :16], color="C%i" % i, marker="o", markersize=6, markeredgecolor="black")
637-
ax[i].set_title("Encoding in hidden dimension %i" % (i + 1))
636+
ax[i].plot(np.arange(1, 17), pe[i, :16], color=f"C{i}", marker="o", markersize=6, markeredgecolor="black")
637+
ax[i].set_title(f"Encoding in hidden dimension {i + 1}")
638638
ax[i].set_xlabel("Position in sequence", fontsize=10)
639639
ax[i].set_ylabel("Positional encoding", fontsize=10)
640640
ax[i].set_xticks(np.arange(1, 17))
@@ -1088,7 +1088,7 @@ def plot_attention_maps(input_data, attn_maps, idx=0):
10881088
ax[row][column].set_xticklabels(input_data.tolist())
10891089
ax[row][column].set_yticks(list(range(seq_len)))
10901090
ax[row][column].set_yticklabels(input_data.tolist())
1091-
ax[row][column].set_title("Layer %i, Head %i" % (row + 1, column + 1))
1091+
ax[row][column].set_title(f"Layer {row + 1}, Head {column + 1}")
10921092
fig.subplots_adjust(hspace=0.5)
10931093
plt.show()
10941094

@@ -1590,7 +1590,7 @@ def visualize_prediction(idx):
15901590
visualize_prediction(mistakes[-1])
15911591
print("Probabilities:")
15921592
for i, p in enumerate(preds[mistakes[-1]].cpu().numpy()):
1593-
print("Image %i: %4.2f%%" % (i, 100.0 * p))
1593+
print(f"Image {i}: {100.0 * p:4.2f}%")
15941594

15951595
# %% [markdown]
15961596
# In this example, the model confuses a palm tree with a building, giving a probability of ~90% to image 2, and 8% to the actual anomaly.

course_UvA-DL/07-deep-energy-based-generative-models/notebook.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ def on_epoch_end(self, trainer, pl_module):
570570
grid = torchvision.utils.make_grid(
571571
imgs_to_plot, nrow=imgs_to_plot.shape[0], normalize=True, value_range=(-1, 1)
572572
)
573-
trainer.logger.experiment.add_image("generation_%i" % i, grid, global_step=trainer.current_epoch)
573+
trainer.logger.experiment.add_image(f"generation_{i}", grid, global_step=trainer.current_epoch)
574574

575575
def generate_imgs(self, pl_module):
576576
pl_module.eval()

course_UvA-DL/08-deep-autoencoders/notebook.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ def on_train_epoch_end(self, trainer, pl_module):
388388
def train_cifar(latent_dim):
389389
# Create a PyTorch Lightning trainer with the generation callback
390390
trainer = pl.Trainer(
391-
default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim),
391+
default_root_dir=os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}"),
392392
accelerator="auto",
393393
devices=1,
394394
max_epochs=500,
@@ -402,7 +402,7 @@ def train_cifar(latent_dim):
402402
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
403403

404404
# Check whether pretrained model exists. If yes, load it and skip training
405-
pretrained_filename = os.path.join(CHECKPOINT_PATH, "cifar10_%i.ckpt" % latent_dim)
405+
pretrained_filename = os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}.ckpt")
406406
if os.path.isfile(pretrained_filename):
407407
print("Found pretrained model, loading...")
408408
model = Autoencoder.load_from_checkpoint(pretrained_filename)
@@ -475,7 +475,7 @@ def visualize_reconstructions(model, input_imgs):
475475
grid = torchvision.utils.make_grid(imgs, nrow=4, normalize=True, value_range=(-1, 1))
476476
grid = grid.permute(1, 2, 0)
477477
plt.figure(figsize=(7, 4.5))
478-
plt.title("Reconstructed from %i latents" % (model.hparams.latent_dim))
478+
plt.title(f"Reconstructed from {model.hparams.latent_dim} latents")
479479
plt.imshow(grid)
480480
plt.axis("off")
481481
plt.show()

course_UvA-DL/09-normalizing-flows/NF.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,7 @@ def visualize_dequantization(quants, prior=None):
512512
x_ticks = []
513513
for v in np.unique(out):
514514
indices = np.where(out == v)
515-
color = to_rgb("C%i" % v)
515+
color = to_rgb(f"C{v}")
516516
plt.fill_between(inp[indices], prob[indices], np.zeros(indices[0].shape[0]), color=color + (0.5,), label=str(v))
517517
plt.plot([inp[indices[0][0]]] * 2, [0, prob[indices[0][0]]], color=color)
518518
plt.plot([inp[indices[0][-1]]] * 2, [0, prob[indices[0][-1]]], color=color)
@@ -525,7 +525,7 @@ def visualize_dequantization(quants, prior=None):
525525
plt.xlim(inp.min(), inp.max())
526526
plt.xlabel("z")
527527
plt.ylabel("Probability")
528-
plt.title("Dequantization distribution for %i discrete values" % quants)
528+
plt.title(f"Dequantization distribution for {quants} discrete values")
529529
plt.legend()
530530
plt.show()
531531
plt.close()

course_UvA-DL/10-autoregressive-image-modeling/notebook.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,7 @@ def show_center_recep_field(img, out):
403403
for l_idx in range(4):
404404
vert_img = vert_conv(vert_img)
405405
horiz_img = horiz_conv(horiz_img) + vert_img
406-
print("Layer %i" % (l_idx + 2))
406+
print(f"Layer {l_idx + 2}")
407407
show_center_recep_field(inp_img, horiz_img)
408408

409409
# %% [markdown]

course_UvA-DL/12-meta-learning/notebook.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -703,10 +703,7 @@ def test_proto_net(model, dataset, data_feats=None, k_shot=4):
703703
data_feats = None
704704
for k in [2, 4, 8, 16, 32]:
705705
protonet_accuracies[k], data_feats = test_proto_net(protonet_model, test_set, data_feats=data_feats, k_shot=k)
706-
print(
707-
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
708-
% (k, 100.0 * protonet_accuracies[k][0], 100 * protonet_accuracies[k][1])
709-
)
706+
print(f"Accuracy for k={k}: {100.0 * protonet_accuracies[k][0]:4.2f}% (+-{100 * protonet_accuracies[k][1]:4.2f}%)")
710707

711708
# %% [markdown]
712709
# Before discussing the results above, let's first plot the accuracies over number of examples in the support set:
@@ -1174,8 +1171,7 @@ def test_protomaml(model, dataset, k_shot=4):
11741171

11751172
for k in protomaml_accuracies:
11761173
print(
1177-
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
1178-
% (k, 100.0 * protomaml_accuracies[k][0], 100.0 * protomaml_accuracies[k][1])
1174+
f"Accuracy for k={k}: {100.0 * protomaml_accuracies[k][0]:4.2f}% (+-{100.0 * protomaml_accuracies[k][1]:4.2f}%)"
11791175
)
11801176

11811177
# %% [markdown]
@@ -1267,8 +1263,7 @@ def test_protomaml(model, dataset, k_shot=4):
12671263
protonet_model, svhn_fewshot_dataset, data_feats=data_feats, k_shot=k
12681264
)
12691265
print(
1270-
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
1271-
% (k, 100.0 * protonet_svhn_accuracies[k][0], 100 * protonet_svhn_accuracies[k][1])
1266+
f"Accuracy for k={k}: {100.0 * protonet_svhn_accuracies[k][0]:4.2f}% (+-{100 * protonet_svhn_accuracies[k][1]:4.2f}%)"
12721267
)
12731268

12741269
# %% [markdown]
@@ -1295,8 +1290,7 @@ def test_protomaml(model, dataset, k_shot=4):
12951290

12961291
for k in protomaml_svhn_accuracies:
12971292
print(
1298-
"Accuracy for k=%i: %4.2f%% (+-%4.2f%%)"
1299-
% (k, 100.0 * protomaml_svhn_accuracies[k][0], 100.0 * protomaml_svhn_accuracies[k][1])
1293+
f"Accuracy for k={k}: {100.0 * protomaml_svhn_accuracies[k][0]:4.2f}% (+-{100.0 * protomaml_svhn_accuracies[k][1]:4.2f}%)"
13001294
)
13011295

13021296
# %% [markdown]

0 commit comments

Comments
 (0)