Commit 12c8bd6a authored by Chris Jewell's avatar Chris Jewell
Browse files

Code cleanup and separation.

parent ed619eae
......@@ -39,17 +39,27 @@ def dense_to_block_diagonal(A, n_blocks):
class CovidUKODE: # TODO: add background case importation rate to the UK, e.g. \epsilon term.
def __init__(self, M_tt, M_hh, C, N, start, end, holidays, bg_max_t, t_step):
def __init__(self,
M_tt: np.float64,
M_hh: np.float64,
W: np.float64,
C: np.float64,
N: np.float64,
date_range: list,
holidays: list,
t_step: np.int64):
"""Represents a CovidUK ODE model
:param K_tt: a MxM matrix of age group mixing in term time
:param K_hh: a MxM matrix of age group mixing in holiday time
:param W: Commuting volume
:param date_range: a time range [start, end)
:param holidays: a list of length-2 tuples containing dates of holidays
:param C: a n_ladsxn_lads matrix of inter-LAD commuting
:param N: a vector of population sizes in each LAD
:param n_lads: the number of LADS
"""
dtype = dtype_util.common_dtype([M_tt, M_hh, C, N], dtype_hint=np.float64)
dtype = dtype_util.common_dtype([M_tt, M_hh, W, C, N], dtype_hint=np.float64)
self.n_ages = M_tt.shape[0]
self.n_lads = C.shape[0]
self.M_tt = tf.convert_to_tensor(M_tt, dtype=tf.float64)
......@@ -74,14 +84,14 @@ class CovidUKODE: # TODO: add background case importation rate to the UK, e.g.
self.C = tf.linalg.LinearOperatorFullMatrix(C + tf.transpose(C))
shp = tf.linalg.LinearOperatorFullMatrix(tf.ones_like(M_tt, dtype=dtype))
self.C = tf.linalg.LinearOperatorKronecker([self.C, shp])
self.W = tf.constant(W, dtype=dtype)
self.N = tf.constant(N, dtype=dtype)
N_matrix = tf.reshape(self.N, [self.n_lads, self.n_ages])
N_sum = tf.reduce_sum(N_matrix, axis=1)
N_sum = N_sum[:, None] * tf.ones([1, self.n_ages], dtype=dtype)
self.N_sum = tf.reshape(N_sum, [-1])
self.times = np.arange(start, end, np.timedelta64(t_step, 'D'))
self.times = np.arange(date_range[0], date_range[1], np.timedelta64(t_step, 'D'))
self.m_select = np.int64((self.times >= holidays[0]) &
(self.times < holidays[1]))
......@@ -99,10 +109,11 @@ class CovidUKODE: # TODO: add background case importation rate to the UK, e.g.
# holiday status as their nearest neighbors in the desired range.
t_idx = tf.clip_by_value(tf.cast(t, tf.int64), 0, self.max_t)
m_switch = tf.gather(self.m_select, t_idx)
commute_volume = tf.pow(tf.gather(self.W, t_idx), param['omega'])
infec_rate = param['beta1'] * (
tf.gather(self.M.matvec(I), m_switch) +
param['beta2'] * self.Kbar * self.C.matvec(I / self.N_sum))
param['beta2'] * self.Kbar * commute_volume * self.C.matvec(I / self.N_sum))
infec_rate = S / self.N * infec_rate
dS = -infec_rate
......
"Plot functions for Covid-19 data brick"
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfs = tfp.stats
def plot_prediction(prediction_period, sims, case_reports):
# Sum over country
sims = tf.reduce_sum(sims, axis=3)
quantiles = [2.5, 50, 97.5]
dates = np.arange(prediction_period[0],
prediction_period[1],
np.timedelta64(1, 'D'))
total_infected = tfs.percentile(tf.reduce_sum(sims[:, :, 1:3], axis=2), q=quantiles, axis=0)
removed = tfs.percentile(sims[:, :, 3], q=quantiles, axis=0)
removed_observed = tfs.percentile(removed * 0.1, q=quantiles, axis=0)
fig = plt.figure()
filler = plt.fill_between(dates, total_infected[0, :], total_infected[2, :], color='lightgray', alpha=0.8, label="95% credible interval")
plt.fill_between(dates, removed[0, :], removed[2, :], color='lightgray', alpha=0.8)
plt.fill_between(dates, removed_observed[0, :], removed_observed[2, :], color='lightgray', alpha=0.8)
ti_line = plt.plot(dates, total_infected[1, :], '-', color='red', alpha=0.4, label="Infected")
rem_line = plt.plot(dates, removed[1, :], '-', color='blue', label="Removed")
ro_line = plt.plot(dates, removed_observed[1, :], '-', color='orange', label='Predicted detections')
data_range = [case_reports['DateVal'].min(), case_reports['DateVal'].max()]
data_dates = np.linspace(data_range[0], data_range[1], np.timedelta64(1, 'D'))
marks = plt.plot(data_dates, case_reports['CumCases'].to_numpy(), '+', label='Observed cases')
plt.legend([ti_line[0], rem_line[0], ro_line[0], filler, marks[0]],
["Infected", "Removed", "Predicted detections", "95% credible interval", "Observed counts"])
plt.grid(color='lightgray', linestyle='dotted')
plt.xlabel("Date")
plt.ylabel("Individuals")
fig.autofmt_xdate()
plt.show()
def plot_case_incidence(dates, sims):
# Number of new cases per day
new_cases = sims[:, :, 3, :].sum(axis=2)
new_cases = new_cases[:, 1:] - new_cases[:, :-1]
new_cases = tfs.percentile(new_cases, q=[2.5, 50, 97.5], axis=0)/10000.
fig = plt.figure()
plt.fill_between(dates[:-1], new_cases[0, :], new_cases[2, :], color='lightgray', label="95% credible interval")
plt.plot(dates[:-1], new_cases[1, :], '-', alpha=0.2, label='New cases')
plt.grid(color='lightgray', linestyle='dotted')
plt.xlabel("Date")
plt.ylabel("Incidence per 10,000")
fig.autofmt_xdate()
plt.show()
\ No newline at end of file
......@@ -4,6 +4,18 @@ import numpy as np
import pandas as pd
import geopandas as gp
def load_commute_volume(filename, date_range):
"""Loads commute data and clips or extends date range"""
commute_raw = pd.read_csv(filename, index_col='date')
commute_raw.sort_index(axis=0, inplace=True)
commute = pd.DataFrame(index=np.arange(date_range[0], date_range[1], np.timedelta64(1,'D')))
commute = commute.merge(commute_raw, left_index=True, right_index=True, how='left')
commute[commute.index < commute_raw.index[0]] = commute_raw.iloc[0, 0]
commute[commute.index > commute_raw.index[-1]] = commute_raw.iloc[-1, 0]
return commute
def group_ages(df):
"""
Sums age groups
......
"Covid analysis utility functions"
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
import h5py
tfs = tfp.stats
def sanitise_parameter(par_dict):
"""Sanitises a dictionary of parameters"""
par = ['epsilon', 'beta1', 'beta2', 'nu', 'gamma']
par = ['omega', 'beta1', 'beta2', 'nu', 'gamma']
d = {key: np.float64(par_dict[key]) for key in par}
return d
def sanitise_settings(par_dict):
d = {'start': np.datetime64(par_dict['start']),
'end': np.datetime64(par_dict['end']),
d = {'inference_period': np.array(par_dict['inference_period'], dtype=np.datetime64),
'prediction_period': np.array(par_dict['prediction_period'], dtype=np.datetime64),
'time_step': float(par_dict['time_step']),
'holiday': np.array([np.datetime64(date) for date in par_dict['holiday']]),
'bg_max_time': np.datetime64(par_dict['bg_max_time'])}
'holiday': np.array([np.datetime64(date) for date in par_dict['holiday']])}
return d
......@@ -59,3 +63,85 @@ def final_size(sim):
fs = remove[-1, :, :].sum(axis=0)
return fs
def brick_to_imperial_csv(creation_date, date, sim, required_dates=None):
"""Converts a simulation brick to an Imperial-style csv
:param creation_date: date timestamp
:param date: the timeseries date
:param sim: the 4D array containing the prediction [M, T, S, L] where M is number of simulations,
T is number of time points, S is number of states, L is number of age/LA combinations.
:param required_dates: the dates between which the forecast is required. Semi-closed interval [low, high)
:returns a Pandas DataFrame object
"""
sim = sim.sum(axis=3) # Todo: sum age/la for now
date = np.array(date, dtype=np.datetime64)
if required_dates is not None:
required_dates = np.array(required_dates, dtype=np.datetime64)
else:
required_dates = [date.min(), date.max()]
# Removals
cases = sim[:, :, 3]
cases_mean = cases.mean(axis=0)
cases_q = np.percentile(a=cases, q=[2.5, 97.5], axis=0)
rv = pd.DataFrame({'Group': 'Lancaster',
'Scenario': 'Forecast',
'CreationDate': creation_date,
'DateOfValue': date,
'Geography': 'England',
'ValueType': 'CumCases',
'Value': cases_mean,
'LowerBound': cases_q[0],
'UpperBound': cases_q[1]
})
rv = rv[np.logical_and(rv['DateOfValue'] >= required_dates[0],
rv['DateOfValue'] < required_dates[1])]
return rv
def save_sims(dates, sims, la_names, age_groups, filename):
f = h5py.File(filename, 'w')
dset_sim = f.create_dataset('prediction', data=sims, compression='gzip', compression_opts=4)
la_long = np.repeat(la_names, age_groups.shape[0]).astype(np.string_)
age_long = np.tile(age_groups, la_names.shape[0]).astype(np.string_)
dset_dims = f.create_dataset("dimnames", data=[b'sim_id', b't', b'state', b'la_age'])
dset_la = f.create_dataset('la_names', data=la_long)
dset_age = f.create_dataset('age_names', data=age_long)
dset_times = f.create_dataset('date', data=dates.astype(np.string_))
f.close()
def extract_locs(in_file: str, out_file: str, loc: list):
f = h5py.File(in_file, 'r')
la_names = f['la_names'][:].astype(str)
la_loc = np.isin(la_names, loc)
extract = f['prediction'][:, :, :, la_loc]
save_sims(f['date'][:], extract, f['la_names'][la_loc],
f['age_names'][la_loc], out_file)
f.close()
return extract
def extract_liverpool(in_file: str, out_file: str):
la_seq = [
"E06000006",
"E06000007",
"E06000008",
"E06000009",
"E08000011", # Knowsley
"E08000012", # Liverpool
"E08000013", # St Helens
"E08000014", # Sefton
"E08000015" # Wirral
]
extract_locs(in_file, out_file, la_seq)
......@@ -4,46 +4,12 @@ import sys
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
import yaml
from covid.model import CovidUKODE
from covid.rdata import *
def sanitise_parameter(par_dict):
"""Sanitises a dictionary of parameters"""
par = ['epsilon', 'beta1', 'beta2', 'nu', 'gamma']
d = {key: np.float64(par_dict[key]) for key in par}
return d
def sanitise_settings(par_dict):
d = {'start': np.datetime64(par_dict['start']),
'end': np.datetime64(par_dict['end']),
'time_step': float(par_dict['time_step']),
'holiday': np.array([np.datetime64(date) for date in par_dict['holiday']]),
'bg_max_time': np.datetime64(par_dict['bg_max_time'])}
return d
def seed_areas(N, names, age_group=8, num_la=152, num_age=17, n_seed=30.):
areas = ['Inner London',
'Outer London',
'West Midlands (Met County)',
'Greater Manchester (Met County)']
names_matrix = names['Area.name.2'].to_numpy().reshape([num_la, num_age])
seed_areas = np.in1d(names_matrix[:, age_group], areas)
N_matrix = N.reshape([num_la, num_age]) # LA x Age
pop_size_sub = N_matrix[seed_areas, age_group] # Gather
n = np.round(n_seed * pop_size_sub / pop_size_sub.sum())
seeding = np.zeros_like(N_matrix)
seeding[seed_areas, age_group] = n # Scatter
return seeding
from covid.pydata import load_commute_volume
from covid.util import sanitise_parameter, sanitise_settings, seed_areas
def sum_age_groups(sim):
......
......@@ -11,6 +11,7 @@ import matplotlib.pyplot as plt
from tensorflow_probability.python.util import SeedStream
from covid.rdata import load_population, load_age_mixing, load_mobility_matrix
from covid.pydata import load_commute_volume
from covid.model import CovidUKODE, covid19uk_logp
from covid.util import *
......@@ -64,9 +65,14 @@ if __name__ == '__main__':
with open(options.config, 'r') as ymlfile:
config = yaml.load(ymlfile)
param = sanitise_parameter(config['parameter'])
settings = sanitise_settings(config['settings'])
K_tt, age_groups = load_age_mixing(config['data']['age_mixing_matrix_term'])
K_hh, _ = load_age_mixing(config['data']['age_mixing_matrix_hol'])
W = load_commute_volume(config['data']['commute_volume'], settings['inference_period'])
T, la_names = load_mobility_matrix(config['data']['mobility_matrix'])
np.fill_diagonal(T, 0.)
......@@ -76,9 +82,7 @@ if __name__ == '__main__':
K_hh = K_hh.astype(DTYPE)
T = T.astype(DTYPE)
N = N.astype(DTYPE)
param = sanitise_parameter(config['parameter'])
settings = sanitise_settings(config['settings'])
W = W.to_numpy().astype(DTYPE)
case_reports = pd.read_csv(config['data']['reported_cases'])
case_reports['DateVal'] = pd.to_datetime(case_reports['DateVal'])
......@@ -92,10 +96,10 @@ if __name__ == '__main__':
M_hh=K_hh,
C=T,
N=N,
start=date_range[0]-np.timedelta64(1,'D'),
end=date_range[1],
W=W,
date_range=[date_range[0]-np.timedelta64(1,'D'),
date_range[1]],
holidays=settings['holiday'],
bg_max_t=settings['bg_max_time'],
t_step=int(settings['time_step']))
seeding = seed_areas(N, n_names) # Seed 40-44 age group, 30 seeds by popn size
......@@ -120,7 +124,7 @@ if __name__ == '__main__':
unconstraining_bijector = [tfb.Exp()]
initial_mcmc_state = np.array([0.05, 0.25], dtype=np.float64)
initial_mcmc_state = np.array([0.06, 0.3], dtype=np.float64)
print("Initial log likelihood:", logp(initial_mcmc_state))
@tf.function(autograph=False, experimental_compile=True)
......@@ -145,32 +149,32 @@ if __name__ == '__main__':
num_covariance_estimation_iterations = 50
num_covariance_estimation_samples = 50
num_final_samples = 10000
with tf.device("/CPU:0"):
start = time.perf_counter()
for i in range(num_covariance_estimation_iterations):
step_start = time.perf_counter()
samples, results = sample(num_covariance_estimation_samples,
initial_mcmc_state,
scale)
step_end = time.perf_counter()
print(f'{i} time {step_end - step_start}')
print("Acceptance: ", results.numpy().mean())
joint_posterior = tf.concat([joint_posterior, samples], axis=0)
cov = tfp.stats.covariance(tf.math.log(joint_posterior))
print(cov.numpy())
scale = cov * 2.38**2 / joint_posterior.shape[1]
initial_mcmc_state = joint_posterior[-1, :]
start = time.perf_counter()
for i in range(num_covariance_estimation_iterations):
step_start = time.perf_counter()
samples, results = sample(num_final_samples,
init_state=joint_posterior[-1, :], scale=scale,)
joint_posterior = tf.concat([joint_posterior, samples], axis=0)
samples, results = sample(num_covariance_estimation_samples,
initial_mcmc_state,
scale)
step_end = time.perf_counter()
print(f'Sampling step time {step_end - step_start}')
end = time.perf_counter()
print(f"Simulation complete in {end-start} seconds")
print("Acceptance: ", np.mean(results.numpy()))
print(tfp.stats.covariance(tf.math.log(joint_posterior)))
print(f'{i} time {step_end - step_start}')
print("Acceptance: ", results.numpy().mean())
joint_posterior = tf.concat([joint_posterior, samples], axis=0)
cov = tfp.stats.covariance(tf.math.log(joint_posterior))
print(cov.numpy())
scale = cov * 2.38**2 / joint_posterior.shape[1]
initial_mcmc_state = joint_posterior[-1, :]
step_start = time.perf_counter()
samples, results = sample(num_final_samples,
init_state=joint_posterior[-1, :], scale=scale,)
joint_posterior = tf.concat([joint_posterior, samples], axis=0)
step_end = time.perf_counter()
print(f'Sampling step time {step_end - step_start}')
end = time.perf_counter()
print(f"Simulation complete in {end-start} seconds")
print("Acceptance: ", np.mean(results.numpy()))
print(tfp.stats.covariance(tf.math.log(joint_posterior)))
fig, ax = plt.subplots(1, 3)
ax[0].plot(joint_posterior[:, 0])
......@@ -178,5 +182,5 @@ if __name__ == '__main__':
plt.show()
print(f"Posterior mean: {np.mean(joint_posterior, axis=0)}")
with open('pi_beta_2020-03-15.pkl', 'wb') as f:
with open('pi_beta_2020-03-23.pkl', 'wb') as f:
pkl.dump(joint_posterior, f)
......@@ -5,23 +5,27 @@ data:
age_mixing_matrix_hol: data/polymod_no_school_df.rds
mobility_matrix: data/movement.rds
population_size: data/pop.rds
commute_volume: data/commute_vol_2020-03-20.csv
reported_cases: data/DailyConfirmedCases_2020-03-20.csv
parameter:
epsilon: 1.0e-6
beta1: 0.04 # R0 2.4
beta2: 0.33 # Contact with commuters 1/3rd of the time
omega: 1.0
nu: 0.25
gamma: 0.25
settings:
start: 2020-02-19
end: 2020-04-01
inference_period:
- 2020-02-19
- 2020-04-01
holiday:
- 2020-03-23
- 2020-10-01
bg_max_time: 2020-03-01
time_step: 1.
prediction_period:
- 2020-02-19
- 2020-06-01
output:
simulation: covid_ode.hd5
......@@ -13,19 +13,12 @@ import h5py
from covid.model import CovidUKODE
from covid.rdata import load_age_mixing, load_mobility_matrix, load_population
from covid.util import sanitise_settings, sanitise_parameter, seed_areas, doubling_time
from covid.pydata import load_commute_volume
from covid.util import sanitise_settings, sanitise_parameter, seed_areas, doubling_time, save_sims
from covid.plotting import plot_prediction, plot_case_incidence
DTYPE = np.float64
def save_sims(sims, la_names, age_groups, filename):
f = h5py.File(filename, 'w')
dset_sim = f.create_dataset('prediction', data=sims)
la_long = np.repeat(la_names, age_groups.shape[0]).astype(np.string_)
age_long = np.tile(age_groups, la_names.shape[0]).astype(np.string_)
dset_dims = f.create_dataset("dimnames", data=[b'sim_id', b't', b'state', b'la_age'])
dset_la = f.create_dataset('la_names', data=la_long)
dset_age = f.create_dataset('age_names', data=age_long)
f.close()
if __name__ == '__main__':
......@@ -37,6 +30,11 @@ if __name__ == '__main__':
with open(options.config, 'r') as ymlfile:
config = yaml.load(ymlfile)
param = sanitise_parameter(config['parameter'])
settings = sanitise_settings(config['settings'])
W = load_commute_volume(config['data']['commute_volume'], settings['prediction_period'])
K_tt, age_groups = load_age_mixing(config['data']['age_mixing_matrix_term'])
K_hh, _ = load_age_mixing(config['data']['age_mixing_matrix_hol'])
......@@ -49,14 +47,11 @@ if __name__ == '__main__':
K_hh = K_hh.astype(DTYPE)
T = T.astype(DTYPE)
N = N.astype(DTYPE)
param = sanitise_parameter(config['parameter'])
param['epsilon'] = 0.0
settings = sanitise_settings(config['settings'])
W = W.to_numpy().astype(DTYPE)
case_reports = pd.read_csv(config['data']['reported_cases'])
case_reports['DateVal'] = pd.to_datetime(case_reports['DateVal'])
case_reports = case_reports[case_reports['DateVal'] >= '2020-02-19']
case_reports = case_reports[case_reports['DateVal'] >= settings['inference_period'][0]]
date_range = [case_reports['DateVal'].min(), case_reports['DateVal'].max()]
y = case_reports['CumCases'].to_numpy()
y_incr = y[1:] - y[-1:]
......@@ -68,8 +63,15 @@ if __name__ == '__main__':
data_dates = np.arange(date_range[0],
date_range[1]+np.timedelta64(1,'D'),
np.timedelta64(1, 'D'))
simulator = CovidUKODE(K_tt, K_hh, T, N, date_range[0] - np.timedelta64(1, 'D'),
np.datetime64('2020-09-01'), settings['holiday'], settings['bg_max_time'], 1)
simulator = CovidUKODE(M_tt=K_tt,
M_hh=K_hh,
C=T,
W=W,
N=N,
date_range=[settings['prediction_period'][0],
settings['prediction_period'][1]],
holidays=settings['holiday'],
t_step=1)
seeding = seed_areas(N, n_names) # Seed 40-44 age group, 30 seeds by popn size
state_init = simulator.create_initial_state(init_matrix=seeding)
......@@ -87,51 +89,17 @@ if __name__ == '__main__':
sims = sims.write(i, sim)
return sims.gather(range(beta.shape[0])), R0.gather(range(beta.shape[0]))
draws = pi_beta.numpy()[np.arange(5000, pi_beta.shape[0], 10), :]
draws = pi_beta.numpy()[np.arange(5000, pi_beta.shape[0], 30), :]
with tf.device('/CPU:0'):
sims, R0 = prediction(draws[:, 0], draws[:, 1])
sims = tf.stack(sims) # shape=[n_sims, n_times, n_states, n_metapops]
save_sims(sims, la_names, age_groups, 'pred_2020-03-15.h5')
sims = tf.stack(sims) # shape=[n_sims, n_times, n_states, n_metapops]
save_sims(simulator.times, sims, la_names, age_groups, 'pred_2020-03-23.h5')
dub_time = [doubling_time(simulator.times, sim, '2020-03-01', '2020-04-01') for sim in sims.numpy()]
# Sum over country
sims = tf.reduce_sum(sims, axis=3)
print("Plotting...", flush=True)
dates = np.arange(date_range[0]-np.timedelta64(1, 'D'), np.datetime64('2020-09-01'),
np.timedelta64(1, 'D'))
total_infected = tfs.percentile(tf.reduce_sum(sims[:, :, 1:3], axis=2), q=[2.5, 50, 97.5], axis=0)
removed = tfs.percentile(sims[:, :, 3], q=[2.5, 50, 97.5], axis=0)
removed_observed = tfs.percentile(removed * 0.1, q=[2.5, 50, 97.5], axis=0)
plot_prediction(settings['prediction_period'], sims, case_reports)
plot_case_incidence(settings['prediction_period'], sims)
fig = plt.figure()
filler = plt.fill_between(dates, total_infected[0, :], total_infected[2, :], color='lightgray', alpha=0.8, label="95% credible interval")
plt.fill_between(dates, removed[0, :], removed[2, :], color='lightgray', alpha=0.8)
plt.fill_between(dates, removed_observed[0, :], removed_observed[2, :], color='lightgray', alpha=0.8)
ti_line = plt.plot(dates, total_infected[1, :], '-', color='red', alpha=0.4, label="Infected")
rem_line = plt.plot(dates, removed[1, :], '-', color='blue', label="Removed")
ro_line = plt.plot(dates, removed_observed[1, :], '-', color='orange', label='Predicted detections')
marks = plt.plot(data_dates, y, '+', label='Observed cases')
plt.legend([ti_line[0], rem_line[0], ro_line[0], filler, marks[0]],
["Infected", "Removed", "Predicted detections", "95% credible interval", "Observed counts"])
plt.grid(color='lightgray', linestyle='dotted')
plt.xlabel("Date")
plt.ylabel("Individuals")
fig.autofmt_xdate()
plt.show()
# Number of new cases per day
new_cases = tfs.percentile(removed[:, 1:] - removed[:, :-1], q=[2.5, 50, 97.5], axis=0)/10000.
fig = plt.figure()
plt.fill_between(dates[:-1], new_cases[0, :], new_cases[2, :], color='lightgray', label="95% credible interval")
plt.plot(dates[:-1], new_cases[1, :], '-', alpha=0.2, label='New cases')
plt.grid(color='lightgray', linestyle='dotted')
plt.xlabel("Date")
plt.ylabel("Incidence per 10,000")
fig.autofmt_xdate()
plt.show()
# R0
R0_ci = tfs.percentile(R0, q=[2.5, 50, 97.5])
......@@ -141,6 +109,7 @@ if __name__ == '__main__':
plt.title("R0")
plt.show()
# Doubling time
dub_ci = tfs.percentile(dub_time, q=[2.5, 50, 97.5])
print("Doubling time:", dub_ci)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment