Skip to content

Commit ac60e69

Browse files
Circle CICircle CI
authored andcommitted
CircleCI update of dev docs (3367).
1 parent 62d8152 commit ac60e69

File tree

366 files changed

+93802
-93663
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

366 files changed

+93802
-93663
lines changed
Binary file not shown.
Binary file not shown.
Binary file not shown.

master/_downloads/19cb597692ceab77fc7a30538a6598cb/plot_ot_batch.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
},
3434
"outputs": [],
3535
"source": [
36-
"import ot\nimport numpy as np\n\nn_problems = 4 # nb problems/batch size\nn_samples = 8 # nb samples\ndim = 2 # nb dimensions\n\nnp.random.seed(0)\nsamples_source = np.random.randn(n_problems, n_samples, dim)\nsamples_target = samples_source + 0.1 * np.random.randn(n_problems, n_samples, dim)\n\n# Naive approach\nM_list = []\nfor i in range(n_problems):\n M_list.append(\n ot.dist(samples_source[i], samples_target[i])\n ) # List of cost matrices n_samples x n_samples\n# Batched approach\nM_batch = ot.batch.dist_batch(\n samples_source, samples_target\n) # Array of cost matrices n_problems x n_samples x n_samples\n\nfor i in range(n_problems):\n assert np.allclose(M_list[i], M_batch[i])"
36+
"import ot\nimport numpy as np\n\nn_problems = 4 # nb problems/batch size\nn_samples = 8 # nb samples\ndim = 2 # nb dimensions\n\nnp.random.seed(0)\nsamples_source = np.random.randn(n_problems, n_samples, dim)\nsamples_target = samples_source + 0.1 * np.random.randn(n_problems, n_samples, dim)\n\n# Naive approach\nM_list = []\nfor i in range(n_problems):\n M_list.append(\n ot.dist(samples_source[i], samples_target[i])\n ) # List of cost matrices n_samples x n_samples\n# Batched approach\nM_batch = ot.dist_batch(\n samples_source, samples_target\n) # Array of cost matrices n_problems x n_samples x n_samples\n\nfor i in range(n_problems):\n assert np.allclose(M_list[i], M_batch[i])"
3737
]
3838
},
3939
{
@@ -51,7 +51,7 @@
5151
},
5252
"outputs": [],
5353
"source": [
54-
"reg = 1.0\nmax_iter = 100\ntol = 1e-3\n\n# Naive approach\nresults_values_list = []\nfor i in range(n_problems):\n res = ot.solve(M_list[i], reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\")\n results_values_list.append(res.value_linear)\n\n# Batched approach\nresults_batch = ot.batch.solve_batch(\n M=M_batch, reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\"\n)\nresults_values_batch = results_batch.value_linear\n\nassert np.allclose(np.array(results_values_list), results_values_batch, atol=tol * 10)"
54+
"reg = 1.0\nmax_iter = 100\ntol = 1e-3\n\n# Naive approach\nresults_values_list = []\nfor i in range(n_problems):\n res = ot.solve(M_list[i], reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\")\n results_values_list.append(res.value_linear)\n\n# Batched approach\nresults_batch = ot.solve_batch(\n M=M_batch, reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\"\n)\nresults_values_batch = results_batch.value_linear\n\nassert np.allclose(np.array(results_values_list), results_values_batch, atol=tol * 10)"
5555
]
5656
},
5757
{
@@ -69,7 +69,7 @@
6969
},
7070
"outputs": [],
7171
"source": [
72-
"from time import perf_counter\n\nn_problems = 128\nn_samples = 8\ndim = 2\nreg = 10.0\nmax_iter = 1000\ntol = 1e-3\n\nsamples_source = np.random.randn(n_problems, n_samples, dim)\nsamples_target = samples_source + 0.1 * np.random.randn(n_problems, n_samples, dim)\n\n\ndef benchmark_naive(samples_source, samples_target):\n start = perf_counter()\n for i in range(n_problems):\n M = ot.dist(samples_source[i], samples_target[i])\n res = ot.solve(M, reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\")\n end = perf_counter()\n return end - start\n\n\ndef benchmark_batch(samples_source, samples_target):\n start = perf_counter()\n M_batch = ot.batch.dist_batch(samples_source, samples_target)\n res_batch = ot.batch.solve_batch(\n M=M_batch, reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\"\n )\n end = perf_counter()\n return end - start\n\n\ntime_naive = benchmark_naive(samples_source, samples_target)\ntime_batch = benchmark_batch(samples_source, samples_target)\n\nprint(f\"Naive approach time: {time_naive:.4f} seconds\")\nprint(f\"Batched approach time: {time_batch:.4f} seconds\")"
72+
"from time import perf_counter\n\nn_problems = 128\nn_samples = 8\ndim = 2\nreg = 10.0\nmax_iter = 1000\ntol = 1e-3\n\nsamples_source = np.random.randn(n_problems, n_samples, dim)\nsamples_target = samples_source + 0.1 * np.random.randn(n_problems, n_samples, dim)\n\n\ndef benchmark_naive(samples_source, samples_target):\n start = perf_counter()\n for i in range(n_problems):\n M = ot.dist(samples_source[i], samples_target[i])\n res = ot.solve(M, reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\")\n end = perf_counter()\n return end - start\n\n\ndef benchmark_batch(samples_source, samples_target):\n start = perf_counter()\n M_batch = ot.dist_batch(samples_source, samples_target)\n res_batch = ot.solve_batch(\n M=M_batch, reg=reg, max_iter=max_iter, tol=tol, reg_type=\"entropy\"\n )\n end = perf_counter()\n return end - start\n\n\ntime_naive = benchmark_naive(samples_source, samples_target)\ntime_batch = benchmark_batch(samples_source, samples_target)\n\nprint(f\"Naive approach time: {time_naive:.4f} seconds\")\nprint(f\"Batched approach time: {time_batch:.4f} seconds\")"
7373
]
7474
},
7575
{
@@ -87,7 +87,7 @@
8787
},
8888
"outputs": [],
8989
"source": [
90-
"from ot import solve_gromov\nfrom ot.batch import solve_gromov_batch\n\n\ndef benchmark_naive_gw(samples_source, samples_target):\n start = perf_counter()\n avg_value = 0\n for i in range(n_problems):\n C1 = ot.dist(samples_source[i], samples_source[i])\n C2 = ot.dist(samples_target[i], samples_target[i])\n res = solve_gromov(C1, C2, max_iter=1000, tol=tol)\n avg_value += res.value\n avg_value /= n_problems\n end = perf_counter()\n return end - start, avg_value\n\n\ndef benchmark_batch_gw(samples_source, samples_target):\n start = perf_counter()\n C1_batch = ot.batch.dist_batch(samples_source, samples_source)\n C2_batch = ot.batch.dist_batch(samples_target, samples_target)\n res_batch = solve_gromov_batch(\n C1_batch, C2_batch, reg=1, max_iter=100, max_iter_inner=50, tol=tol\n )\n avg_value = np.mean(res_batch.value)\n end = perf_counter()\n return end - start, avg_value\n\n\ntime_naive_gw, avg_value_naive_gw = benchmark_naive_gw(samples_source, samples_target)\ntime_batch_gw, avg_value_batch_gw = benchmark_batch_gw(samples_source, samples_target)\n\nprint(f\"{'Method':<20}{'Time (s)':<15}{'Avg Value':<15}\")\nprint(f\"{'Naive GW':<20}{time_naive_gw:<15.4f}{avg_value_naive_gw:<15.4f}\")\nprint(f\"{'Batched GW':<20}{time_batch_gw:<15.4f}{avg_value_batch_gw:<15.4f}\")"
90+
"from ot import solve_gromov, solve_gromov_batch\n\n\ndef benchmark_naive_gw(samples_source, samples_target):\n start = perf_counter()\n avg_value = 0\n for i in range(n_problems):\n C1 = ot.dist(samples_source[i], samples_source[i])\n C2 = ot.dist(samples_target[i], samples_target[i])\n res = solve_gromov(C1, C2, max_iter=1000, tol=tol)\n avg_value += res.value\n avg_value /= n_problems\n end = perf_counter()\n return end - start, avg_value\n\n\ndef benchmark_batch_gw(samples_source, samples_target):\n start = perf_counter()\n C1_batch = ot.dist_batch(samples_source, samples_source)\n C2_batch = ot.dist_batch(samples_target, samples_target)\n res_batch = solve_gromov_batch(\n C1_batch, C2_batch, reg=1, max_iter=100, max_iter_inner=50, tol=tol\n )\n avg_value = np.mean(res_batch.value)\n end = perf_counter()\n return end - start, avg_value\n\n\ntime_naive_gw, avg_value_naive_gw = benchmark_naive_gw(samples_source, samples_target)\ntime_batch_gw, avg_value_batch_gw = benchmark_batch_gw(samples_source, samples_target)\n\nprint(f\"{'Method':<20}{'Time (s)':<15}{'Avg Value':<15}\")\nprint(f\"{'Naive GW':<20}{time_naive_gw:<15.4f}{avg_value_naive_gw:<15.4f}\")\nprint(f\"{'Batched GW':<20}{time_batch_gw:<15.4f}{avg_value_batch_gw:<15.4f}\")"
9191
]
9292
},
9393
{
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
0 Bytes
Binary file not shown.
Binary file not shown.

0 commit comments

Comments
 (0)