Closed paegodu closed 5 years ago
Via other comments, can do make_simple_step_size_update_policy(num_adaptation_steps=None)
It works, thanks.
If anyone else tries this, beware that it takes a few minutes to process.
Via other comments, can do
make_simple_step_size_update_policy(num_adaptation_steps=None)
I tried on Colab then this cell is running in the endless.
It has run for more than 2 hours but still no result.
# Set the chain's start state.
initial_chain_state = [
tf.to_float(tf.reduce_mean(count_data)) * tf.ones([], dtype=tf.float32, name="init_lambda1"),
tf.to_float(tf.reduce_mean(count_data)) * tf.ones([], dtype=tf.float32, name="init_lambda2"),
0.5 * tf.ones([], dtype=tf.float32, name="init_tau"),
]
# Since HMC operates over unconstrained space, we need to transform the
# samples so they live in real-space.
unconstraining_bijectors = [
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Sigmoid(), # Maps [0,1] to R.
]
def joint_log_prob(count_data, lambda_1, lambda_2, tau):
tfd = tfp.distributions
alpha = (1. / tf.reduce_mean(count_data))
rv_lambda_1 = tfd.Exponential(rate=alpha)
rv_lambda_2 = tfd.Exponential(rate=alpha)
rv_tau = tfd.Uniform()
lambda_ = tf.gather(
[lambda_1, lambda_2],
indices=tf.to_int32(tau * tf.to_float(tf.size(count_data)) <= tf.to_float(tf.range(tf.size(count_data)))))
rv_observation = tfd.Poisson(rate=lambda_)
return (
rv_lambda_1.log_prob(lambda_1)
+ rv_lambda_2.log_prob(lambda_2)
+ rv_tau.log_prob(tau)
+ tf.reduce_sum(rv_observation.log_prob(count_data))
)
# Define a closure over our joint_log_prob.
def unnormalized_log_posterior(lambda1, lambda2, tau):
return joint_log_prob(count_data, lambda1, lambda2, tau)
# Initialize the step_size. (It will be automatically adapted.)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
step_size = tf.get_variable(
name='step_size',
initializer=tf.constant(0.05, dtype=tf.float32),
trainable=False,
use_resource=True
)
# Sample from the chain.
[
lambda_1_samples,
lambda_2_samples,
posterior_tau,
], kernel_results = tfp.mcmc.sample_chain(
num_results=100000,
num_burnin_steps=10000,
current_state=initial_chain_state,
kernel=tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
num_leapfrog_steps=2,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(num_adaptation_steps=None),
state_gradients_are_stopped=True),
bijector=unconstraining_bijectors))
tau_samples = tf.floor(posterior_tau * tf.to_float(tf.size(count_data)))
# tau_samples, lambda_1_samples, lambda_2_samples contain
# N samples from the corresponding posterior distribution
N = tf.shape(tau_samples)[0]
expected_texts_per_day = tf.zeros(n_count_data)
# Initialize any created variables.
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
Both in Google collab and Jupyter there is a missing argument under the section "Specify the posterior sampler".
When calling
tfp.mcmc.make_simple_step_size_update_policy()
the following error is raised: