Tensorflow-Probability - mcm c .sample_chain: ValueError -Code from 'Probabilisti c Programming and Bayesian Methods for Hackers " - PullRequest
0 голосов
/ 06 мая 2020

Я получаю это сообщение, когда запускаю 3 урока «Вероятность c Программирование и байесовские методы для хакеров» - Я адаптировал код для работы с тензорным потоком 2:

ValueError: Failed to convert a NumPy array to a Tensor (Unsupported numpy type: NPY_INT).

Код следующий:

def joint_log_prob(data_, sample_prob_1, sample_centers, sample_sds):
    """
    Joint log probability optimization function.

    Args:
      data: tensor array representation of original data
      sample_prob_1: Scalar representing probability (out of 1.0) of assignment 
        being 0
      sample_sds: 2d vector containing standard deviations for both normal dists
        in model
      sample_centers: 2d vector containing centers for both normal dists in model
    Returns: 
      Joint log probability optimization function.
    """  
    ### Create a mixture of two scalar Gaussians:
    rv_prob = tfd.Uniform(name='rv_prob', low=0., high=1.)
    sample_prob_2 = 1. - sample_prob_1
    rv_assignments = tfd.Categorical(probs=tf.stack([sample_prob_1, sample_prob_2]))

    rv_sds = tfd.Uniform(name="rv_sds", low=[0., 0.], high=[100., 100.])
    rv_centers = tfd.Normal(name="rv_centers", loc=[120., 190.], scale=[10., 10.])

    rv_observations = tfd.MixtureSameFamily(
        mixture_distribution=rv_assignments,
        components_distribution=tfd.Normal(
          loc=sample_centers,       # One for each component.
          scale=sample_sds))        # And same here.
    return (
        rv_prob.log_prob(sample_prob_1)
        + rv_prob.log_prob(sample_prob_2)
        + tf.reduce_sum(rv_observations.log_prob(data_))      # Sum over samples.
        + tf.reduce_sum(rv_centers.log_prob(sample_centers)) # Sum over components.
        + tf.reduce_sum(rv_sds.log_prob(sample_sds))         # Sum over components.
    )
number_of_steps=25000 #@param {type:"slider", min:0, max:50000, step:1000}
burnin=1000 #@param {type:"slider", min:0, max:2000, step:100}
num_leapfrog_steps=3

# Set the chain's start state.
initial_chain_state = [
    tf.constant(0.5, name='init_probs'),
    tf.constant([120., 190.], name='init_centers'),
    tf.constant([10., 10.], name='init_sds')
]

# Since MCMC operates over unconstrained space, we need to transform the
# samples so they live in real-space.
unconstraining_bijectors = [
    tfp.bijectors.Identity(),       # Maps R to R.
    tfp.bijectors.Identity(),       # Maps R to R.
    tfp.bijectors.Identity(),       # Maps R to R.
]



# Define a closure over our joint_log_prob.
unnormalized_posterior_log_prob = lambda *args: joint_log_prob(data_, *args)

hmc = tfp.mcmc.TransformedTransitionKernel(
    inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
      target_log_prob_fn=unnormalized_posterior_log_prob,
      step_size= step_size,
         state_gradients_are_stopped=True,
      num_leapfrog_steps=num_leapfrog_steps),
    bijector=unconstraining_bijectors)

hmc = tfp.mcmc.SimpleStepSizeAdaptation(
    inner_kernel= hmc, num_adaptation_steps=int(burnin * 0.8))

# The chain will be stepped for num_results + num_burnin_steps, adapting for
# the first num_adaptation_steps.
[
    posterior_prob,
    posterior_centers,
    posterior_sds
], kernel_results = tfp.mcmc.sample_chain(
    num_results=number_of_steps,
    num_burnin_steps=burnin,
    current_state=initial_chain_state,
    kernel=hmc)

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-164-4401fbc2666a> in <module>
      9     num_burnin_steps=burnin,
     10     current_state=initial_chain_state,
---> 11     kernel=hmc)

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\sample.py in sample_chain(num_results, current_state, previous_kernel_results, kernel, num_burnin_steps, num_steps_between_results, trace_fn, return_final_kernel_results, parallel_iterations, name)
    357                                             trace_fn(*state_and_results)),
    358         # pylint: enable=g-long-lambda
--> 359         parallel_iterations=parallel_iterations)
    360 
    361     if return_final_kernel_results:

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in trace_scan(loop_fn, initial_state, elems, trace_fn, parallel_iterations, name)
    393         body=_body,
    394         loop_vars=(0, initial_state, trace_arrays),
--> 395         parallel_iterations=parallel_iterations)
    396 
    397     stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
    572                   func.__module__, arg_name, arg_value, 'in a future version'
    573                   if date is None else ('after %s' % date), instructions)
--> 574       return func(*args, **kwargs)
    575 
    576     doc = _add_deprecated_arg_value_notice_to_docstring(

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop_v2(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, maximum_iterations, name)
   2489       name=name,
   2490       maximum_iterations=maximum_iterations,
-> 2491       return_same_structure=True)
   2492 
   2493 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
   2725                                               list(loop_vars))
   2726       while cond(*loop_vars):
-> 2727         loop_vars = body(*loop_vars)
   2728         if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
   2729           packed = True

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in _body(i, state, trace_arrays)
    382 
    383     def _body(i, state, trace_arrays):
--> 384       state = loop_fn(state, elems_array.read(i))
    385       trace_arrays = tf.nest.pack_sequence_as(trace_arrays, [
    386           a.write(i, v) for a, v in zip(

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\sample.py in _trace_scan_fn(state_and_results, num_steps)
    341           body_fn=kernel.one_step,
    342           initial_loop_vars=list(state_and_results),
--> 343           parallel_iterations=parallel_iterations)
    344       return next_state, current_kernel_results
    345 

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in smart_for_loop(loop_num_iter, body_fn, initial_loop_vars, parallel_iterations, name)
    315           body=lambda i, *args: [i + 1] + list(body_fn(*args)),
    316           loop_vars=[np.int32(0)] + initial_loop_vars,
--> 317           parallel_iterations=parallel_iterations
    318       )[1:]
    319     result = initial_loop_vars

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
    572                   func.__module__, arg_name, arg_value, 'in a future version'
    573                   if date is None else ('after %s' % date), instructions)
--> 574       return func(*args, **kwargs)
    575 
    576     doc = _add_deprecated_arg_value_notice_to_docstring(

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop_v2(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, maximum_iterations, name)
   2489       name=name,
   2490       maximum_iterations=maximum_iterations,
-> 2491       return_same_structure=True)
   2492 
   2493 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
   2725                                               list(loop_vars))
   2726       while cond(*loop_vars):
-> 2727         loop_vars = body(*loop_vars)
   2728         if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
   2729           packed = True

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in <lambda>(i, *args)
    313       return tf.while_loop(
    314           cond=lambda i, *args: i < loop_num_iter,
--> 315           body=lambda i, *args: [i + 1] + list(body_fn(*args)),
    316           loop_vars=[np.int32(0)] + initial_loop_vars,
    317           parallel_iterations=parallel_iterations

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\simple_step_size_adaptation.py in one_step(self, current_state, previous_kernel_results)
    378         reduced_log_accept_prob = reduce_logmeanexp(
    379             log_accept_prob,
--> 380             axis=prefer_static.range(num_reduce_dims))
    381         # reduced_log_accept_prob must broadcast into step_size_part on the
    382         # left, so we do an additional reduction over dimensions where their

~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\math\generic.py in reduce_logmeanexp(input_tensor, axis, keepdims, name)
    109     lse = tf.reduce_logsumexp(input_tensor, axis=axis, keepdims=keepdims)
    110     n = prefer_static.size(input_tensor) // prefer_static.size(lse)
--> 111     log_n = tf.math.log(tf.cast(n, lse.dtype))
    112     return lse - log_n
    113 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\dispatch.py in wrapper(*args, **kwargs)
    178     """Call target, and fall back on dispatchers if there is a TypeError."""
    179     try:
--> 180       return target(*args, **kwargs)
    181     except (TypeError, ValueError):
    182       # Note: convert_to_eager_tensor currently raises a ValueError, not a

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\math_ops.py in cast(x, dtype, name)
    746       # allows some conversions that cast() can't do, e.g. casting numbers to
    747       # strings.
--> 748       x = ops.convert_to_tensor(x, name="x")
    749       if x.dtype.base_dtype != base_type:
    750         x = gen_math_ops.cast(x, base_type, name=name)

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
   1348 
   1349     if ret is None:
-> 1350       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1351 
   1352     if ret is NotImplemented:

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***)
     50 def _default_conversion_function(value, dtype, name, as_ref):
     51   del as_ref  # Unused.
---> 52   return constant_op.constant(value, dtype, name=name)
     53 
     54 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in constant(value, dtype, shape, name)
    256   """
    257   return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 258                         allow_broadcast=True)
    259 
    260 

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
    264   ctx = context.context()
    265   if ctx.executing_eagerly():
--> 266     t = convert_to_eager_tensor(value, ctx, dtype)
    267     if shape is None:
    268       return t

~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     94       dtype = dtypes.as_dtype(dtype).as_datatype_enum
     95   ctx.ensure_initialized()
---> 96   return ops.EagerTensor(value, ctx.device_name, dtype)
     97 
     98 

ValueError: Failed to convert a NumPy array to a Tensor (Unsupported numpy type: NPY_INT).
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...