Я думаю, что вы можете делать то, что вы хотите с scipy.ndimage.map_coordinates
:
import numpy as np
import scipy.ndimage.interpolation
def resize_batch(image_batch, new_width, new_height):
image_batch = np.asarray(image_batch)
shape = list(image_batch.shape)
shape[1] = new_width
shape[2] = new_height
ind = np.indices(shape, dtype=float)
ind[1] *= (image_batch.shape[1] - 1) / float(new_width - 1)
ind[2] *= (image_batch.shape[2] - 1) / float(new_height - 1)
return scipy.ndimage.interpolation.map_coordinates(image_batch, ind, order=1)
print(resize_batch(np.zeros([10, 20, 30]), 60, 15).shape)
# (10, 60, 15)
print(resize_batch(np.zeros([10, 20, 30, 3]), 60, 15).shape)
# (10, 60, 15, 3)
EDIT:
Вот несколько других версий. Этот использует только операции NumPy без SciPy, вычисляя билинейную интерполяцию «вручную»:
import numpy as np
def resize_batch_np(image_batch, new_width, new_height):
dtype = image_batch.dtype
n, width, height = image_batch.shape[:3]
extra_dims = image_batch.ndim - 3
w = np.linspace(0, width - 1, new_width, dtype=dtype)[:, np.newaxis]
h = np.linspace(0, height - 1, new_height, dtype=dtype)
nn = np.arange(n, dtype=np.int32)[:, np.newaxis, np.newaxis]
ii_1 = w.astype(np.int32)
ii_2 = (ii_1 + 1).clip(max=width - 1)
w_alpha = w - ii_1
w_alpha = w_alpha.reshape(w_alpha.shape + (1,) * extra_dims)
w_alpha_1 = 1 - (w_alpha)
jj_1 = h.astype(np.int32)
jj_2 = (jj_1 + 1).clip(max=height - 1)
h_alpha = h - jj_1
h_alpha = h_alpha.reshape(h_alpha.shape + (1,) * extra_dims)
h_alpha_1 = 1 - (h_alpha)
out_11 = image_batch[nn, ii_1, jj_1]
out_12 = image_batch[nn, ii_1, jj_2]
out_21 = image_batch[nn, ii_2, jj_1]
out_22 = image_batch[nn, ii_2, jj_2]
return ((out_11 * h_alpha_1 + out_12 * h_alpha) * w_alpha_1 +
(out_21 * h_alpha_1 + out_22 * h_alpha) * w_alpha)
И этот другой делает то же самое, но с Нумбой:
import numpy as np
import numba as nb
@nb.njit(parallel=True)
def resize_batch_nb(image_batch, new_width, new_height):
dtype = image_batch.dtype
n, width, height = image_batch.shape[:3]
extra_dims = image_batch.ndim - 3
w = np.empty(new_width, dtype=dtype)
for i in range(new_width):
w[i] = (width - 1) * i / (new_width - 1)
h = np.empty(new_height, dtype=dtype)
for i in range(new_height):
h[i] = (height - 1) * i / (new_height - 1)
ii_1 = w.astype(np.int32)
ii_2 = np.minimum(ii_1 + 1, width - 1)
w_alpha = w - ii_1
w_alpha_1 = 1 - (w_alpha)
jj_1 = h.astype(np.int32)
jj_2 = np.minimum(jj_1 + 1, height - 1)
h_alpha = h - jj_1
h_alpha_1 = 1 - (h_alpha)
out = np.empty((n, new_width, new_height) + image_batch.shape[3:], dtype=dtype)
for idx in nb.prange(n):
for i in nb.prange(new_width):
for j in nb.prange(new_height):
out_11 = image_batch[idx, ii_1[i], jj_1[j]]
out_12 = image_batch[idx, ii_1[i], jj_2[j]]
out_21 = image_batch[idx, ii_2[i], jj_1[j]]
out_22 = image_batch[idx, ii_2[i], jj_2[j]]
out_1 = out_11 * h_alpha_1[j] + out_12 * h_alpha[j]
out_2 = out_21 * h_alpha_1[j] + out_22 * h_alpha[j]
out[idx, i, j] = out_1 * w_alpha_1[i] + out_2 * w_alpha[i]
return out
Результат тот же, что и раньше:
import numpy as np
np.random.seed(100)
image_batch = np.random.rand(100, 200, 300, 3).astype(float)
new_width = 60
new_height = 80
out = resize_batch(image_batch, new_width, new_height)
out_np = resize_batch_np(image_batch, new_width, new_height)
out_nb = resize_batch_nb(image_batch, new_width, new_height)
print(np.allclose(out, out_np))
# True
print(np.allclose(out, out_nb))
# True
Но производительность значительно улучшается:
%timeit resize_batch(image_batch, new_width, new_height)
# 211 ms ± 9.36 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
%timeit resize_batch_np(image_batch, new_width, new_height)
# 106 ms ± 1.8 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
%timeit resize_batch_nb(image_batch, new_width, new_height)
# 48.3 ms ± 142 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)