Commit 899551a9 authored by Chris Jewell's avatar Chris Jewell

Fixed a silly bug in random_walk_mvnorm_fn

parent e26715b6
......@@ -39,14 +39,20 @@ def plotting(dates, sim):
def random_walk_mvnorm_fn(covariance, name=None):
"""Returns callable that adds Multivariate Normal noise to the input"""
covariance = covariance + tf.eye(covariance.shape[0], dtype=tf.float64) * 1.e-9
rv = tfp.distributions.MultivariateNormalTriL(loc=tf.zeros(covariance.shape[0], dtype=tf.float64),
scale_tril=tf.linalg.cholesky(tf.convert_to_tensor(covariance, dtype=tf.float64)))
scale_tril=tf.linalg.cholesky(
tf.convert_to_tensor(covariance,
dtype=tf.float64)))
def _fn(state_parts, seed):
with tf.name_scope(name or 'random_walk_mvnorm_fn'):
seed_stream = SeedStream(seed, salt='RandomWalkNormalFn')
return rv.sample() + state_parts
new_state_parts = rv.sample() + state_parts
return new_state_parts
return _fn
if __name__ == '__main__':
......@@ -81,12 +87,12 @@ if __name__ == '__main__':
seeding = seed_areas(N, n_names) # Seed 40-44 age group, 30 seeds by popn size
state_init = simulator.create_initial_state(init_matrix=seeding)
@tf.function
#@tf.function
def logp(par):
p = param
p['epsilon'] = par[0]
p['beta1'] = par[1]
p['gamma'] = par[2]
#p['gamma'] = par[2]
epsilon_logp = tfd.Gamma(concentration=tf.constant(1., tf.float64), rate=tf.constant(1., tf.float64)).log_prob(p['epsilon'])
beta_logp = tfd.Gamma(concentration=tf.constant(1., tf.float64), rate=tf.constant(1., tf.float64)).log_prob(p['beta1'])
gamma_logp = tfd.Gamma(concentration=tf.constant(100., tf.float64), rate=tf.constant(400., tf.float64)).log_prob(p['gamma'])
......@@ -103,10 +109,10 @@ if __name__ == '__main__':
unconstraining_bijector = [tfb.Exp()]
initial_mcmc_state = np.array([0.001, 0.036, 0.25], dtype=np.float64)
initial_mcmc_state = np.array([0.001, 0.036], dtype=np.float64)
print("Initial log likelihood:", logp(initial_mcmc_state))
@tf.function
#@tf.function
def sample(n_samples, init_state, scale):
return tfp.mcmc.sample_chain(
num_results=n_samples,
......@@ -118,30 +124,28 @@ if __name__ == '__main__':
new_state_fn=random_walk_mvnorm_fn(scale)
),
bijector=unconstraining_bijector),
trace_fn=lambda _, pkr: pkr.inner_results.is_accepted)
trace_fn=lambda _, pkr: pkr)
with tf.device("/CPU:0"):
cov = np.diag([0.001, 0.03, 1.02]) * 2.38**2 / 3
cov = np.diag([0.00001, 0.00001])
start = time.perf_counter()
joint_posterior, results = sample(100, init_state=initial_mcmc_state, scale=cov)
for i in range(10):
cov = tfp.stats.covariance(tf.math.log(joint_posterior)) * 2.38**2 / joint_posterior.shape[0] + tf.eye(cov.shape[0], dtype=tf.float64)*1.e-7
joint_posterior, results = sample(50, init_state=initial_mcmc_state, scale=cov)
for i in range(20):
cov = tfp.stats.covariance(tf.math.log(joint_posterior)) * 2.38**2 / joint_posterior.shape[1]
print(cov.numpy())
posterior_new, results = sample(100, joint_posterior[-1, :], cov)
posterior_new, results = sample(50, joint_posterior[-1, :], cov)
joint_posterior = tf.concat([joint_posterior, posterior_new], axis=0)
posterior_new, results = sample(2000, init_state=joint_posterior[-1, :], scale=cov)
posterior_new, results = sample(1000, init_state=joint_posterior[-1, :], scale=cov)
joint_posterior = tf.concat([joint_posterior, posterior_new], axis=0)
end = time.perf_counter()
print(f"Simulation complete in {end-start} seconds")
print("Acceptance: ", np.mean(results.numpy()))
print(tfp.stats.covariance(tf.math.log(joint_posterior[1000:, :])))
print(tfp.stats.covariance(tf.math.log(joint_posterior)))
fig, ax = plt.subplots(1, 3)
ax[0].plot(joint_posterior[:, 0])
ax[1].plot(joint_posterior[:, 1])
ax[2].plot(joint_posterior[:, 2])
#ax[2].plot(joint_posterior[:, 2])
plt.show()
print(f"Posterior mean: {np.mean(joint_posterior, axis=0)}")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment