Вот рекурсивное решение.Он не использует NumPy и не очень эффективен, хотя он должен быть быстрее, чем опубликованный фрагмент:
import math
from itertools import permutations
def probability_grid(values, n):
values = set(values)
# Check if we can extend the probability distribution with zeros
with_zero = 0. in values
values.discard(0.)
if not values:
raise StopIteration
values = list(values)
for p in _probability_grid_rec(values, n, [], 0.):
if with_zero:
# Add necessary zeros
p += (0.,) * (n - len(p))
if len(p) == n:
yield from set(permutations(p)) # faster: more_itertools.distinct_permutations(p)
def _probability_grid_rec(values, n, current, current_sum, eps=1e-10):
if not values or n <= 0:
if abs(current_sum - 1.) <= eps:
yield tuple(current)
else:
value, *values = values
inv = 1. / value
# Skip this value
yield from _probability_grid_rec(
values, n, current, current_sum, eps)
# Add copies of this value
precision = round(-math.log10(eps))
adds = int(round((1. - current_sum) / value, precision))
for i in range(adds):
current.append(value)
current_sum += value
n -= 1
yield from _probability_grid_rec(
values, n, current, current_sum, eps)
# Remove copies of this value
if adds > 0:
del current[-adds:]
print(list(probability_grid([0, 0.5, 1.], 3)))
Вывод:
[(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), (0.5, 0.5, 0.0), (0.0, 0.5, 0.5), (0.5, 0.0, 0.5)]
Быстрое сравнение с опубликованным методом:
from itertools import product
def probability_grid_basic(values, n):
grid_redundant = product(values, repeat=n)
return [point for point in grid_redundant if sum(point)==1]
values = [0, 0.25, 1./3., .5, 1]
n = 6
%timeit list(probability_grid(values, n))
1.61 ms ± 20.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
%timeit probability_grid_basic(values, n)
6.27 ms ± 186 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)