Brock and Hommes model¶
In [1]:
Copied!
from blackbirds.models.brock_hommes import BrockHommes
from blackbirds.infer import VI
from blackbirds.utils import soft_minimum, soft_maximum
from blackbirds.simulate import simulate_and_observe_model
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import networkx
import normflows as nf
import pygtc
from blackbirds.models.brock_hommes import BrockHommes
from blackbirds.infer import VI
from blackbirds.utils import soft_minimum, soft_maximum
from blackbirds.simulate import simulate_and_observe_model
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import networkx
import normflows as nf
import pygtc
In [2]:
Copied!
class MMDLoss:
def __init__(self, model, data):
self.model = model
self.y = data[0]
self.y_matrix = self.y.reshape(1,-1,1)
self.y_sigma = torch.median(torch.pow(torch.cdist(self.y_matrix, self.y_matrix), 2))
ny = self.y.shape[0]
self.kyy = (torch.exp( - torch.pow(torch.cdist(self.y_matrix, self.y_matrix), 2) / self.y_sigma ) - torch.eye(ny)).sum() / (ny * (ny - 1))
def __call__(self, params, data):
x = simulate_and_observe_model(self.model, params, gradient_horizon=0)[0]
nx = x.shape[0]
x_matrix = x.reshape(1,-1,1)
kxx = torch.exp( - torch.pow(torch.cdist(x_matrix, x_matrix), 2) / self.y_sigma )
#kxx = torch.nan_to_num(kxx, 0.)
kxx = (kxx - torch.eye(nx)).sum() / (nx * (nx - 1))
kxy = torch.exp( - torch.pow(torch.cdist(x_matrix, self.y_matrix), 2) / self.y_sigma )
#kxy = torch.nan_to_num(kxy, 0.)
kxy = kxy.mean()
return kxx + self.kyy - 2 * kxy
class MMDLoss:
def __init__(self, model, data):
self.model = model
self.y = data[0]
self.y_matrix = self.y.reshape(1,-1,1)
self.y_sigma = torch.median(torch.pow(torch.cdist(self.y_matrix, self.y_matrix), 2))
ny = self.y.shape[0]
self.kyy = (torch.exp( - torch.pow(torch.cdist(self.y_matrix, self.y_matrix), 2) / self.y_sigma ) - torch.eye(ny)).sum() / (ny * (ny - 1))
def __call__(self, params, data):
x = simulate_and_observe_model(self.model, params, gradient_horizon=0)[0]
nx = x.shape[0]
x_matrix = x.reshape(1,-1,1)
kxx = torch.exp( - torch.pow(torch.cdist(x_matrix, x_matrix), 2) / self.y_sigma )
#kxx = torch.nan_to_num(kxx, 0.)
kxx = (kxx - torch.eye(nx)).sum() / (nx * (nx - 1))
kxy = torch.exp( - torch.pow(torch.cdist(x_matrix, self.y_matrix), 2) / self.y_sigma )
#kxy = torch.nan_to_num(kxy, 0.)
kxy = kxy.mean()
return kxx + self.kyy - 2 * kxy
Following Dyer et al we fix
In [3]:
Copied!
H = 4
log_r = np.log(1.0)
log_sigma = np.log(0.04)
g1, b1, b4, = 0, 0, 0
g4 = 1.01
log_beta = np.log(120)
n_timesteps = 100
class CustomBrockHommes(BrockHommes):
def step(self, params, x):
expanded_params = torch.tensor([log_beta, g1, 0, 0, g4, b1, 0, 0, b4, log_sigma, log_r])
expanded_params[2] = params[0] # g2
expanded_params[3] = params[1] # g3
expanded_params[6] = params[2] # b2
expanded_params[7] = params[3] # b2
return super().step(expanded_params, x)
H = 4
log_r = np.log(1.0)
log_sigma = np.log(0.04)
g1, b1, b4, = 0, 0, 0
g4 = 1.01
log_beta = np.log(120)
n_timesteps = 100
class CustomBrockHommes(BrockHommes):
def step(self, params, x):
expanded_params = torch.tensor([log_beta, g1, 0, 0, g4, b1, 0, 0, b4, log_sigma, log_r])
expanded_params[2] = params[0] # g2
expanded_params[3] = params[1] # g3
expanded_params[6] = params[2] # b2
expanded_params[7] = params[3] # b2
return super().step(expanded_params, x)
In [4]:
Copied!
model = CustomBrockHommes(n_timesteps)
model = CustomBrockHommes(n_timesteps)
In [5]:
Copied!
g2 = 0.9
g3 = 0.9
b2 = 0.2
b3 = -0.2
true_parameters = torch.tensor([g2, g3, b2, b3])
torch.manual_seed(0)
x = model.run(true_parameters)
data = model.observe(x)
g2 = 0.9
g3 = 0.9
b2 = 0.2
b3 = -0.2
true_parameters = torch.tensor([g2, g3, b2, b3])
torch.manual_seed(0)
x = model.run(true_parameters)
data = model.observe(x)
In [6]:
Copied!
plt.plot(data[0].cpu())
plt.plot(data[0].cpu())
Out[6]:
[<matplotlib.lines.Line2D at 0x15f687b20>]
In [7]:
Copied!
def make_flow():
torch.manual_seed(0)
base = nf.distributions.base.DiagGaussian(len(true_parameters))
num_layers = 5
latent_size = len(true_parameters)
flows = []
for i in range(num_layers):
param_map = nf.nets.MLP([2, 50, 50, latent_size], init_zeros=True)
flows.append(nf.flows.AffineCouplingBlock(param_map))
flows.append(nf.flows.Permute(latent_size, mode='swap'))
return nf.NormalizingFlow(base, flows)
def make_flow():
torch.manual_seed(0)
base = nf.distributions.base.DiagGaussian(len(true_parameters))
num_layers = 5
latent_size = len(true_parameters)
flows = []
for i in range(num_layers):
param_map = nf.nets.MLP([2, 50, 50, latent_size], init_zeros=True)
flows.append(nf.flows.AffineCouplingBlock(param_map))
flows.append(nf.flows.Permute(latent_size, mode='swap'))
return nf.NormalizingFlow(base, flows)
In [8]:
Copied!
torch.manual_seed(0)
prior = torch.distributions.MultivariateNormal(torch.tensor([0.5, 0.5, 0.5, -0.5]), 1.0 * torch.eye(len(true_parameters)))
estimator = make_flow()
loss = MMDLoss(model, data)
optimizer = torch.optim.AdamW(estimator.parameters(), lr=1e-3)
vi = VI(loss = loss,
posterior_estimator = estimator,
prior=prior,
optimizer=optimizer,
n_samples_per_epoch=10,
w=0.001,
log_tensorboard=True,
gradient_estimation_method="pathwise",
gradient_clipping_norm=1.0,
gradient_horizon=0
)
vi.run(data, n_epochs=1000, max_epochs_without_improvement=50);
torch.manual_seed(0)
prior = torch.distributions.MultivariateNormal(torch.tensor([0.5, 0.5, 0.5, -0.5]), 1.0 * torch.eye(len(true_parameters)))
estimator = make_flow()
loss = MMDLoss(model, data)
optimizer = torch.optim.AdamW(estimator.parameters(), lr=1e-3)
vi = VI(loss = loss,
posterior_estimator = estimator,
prior=prior,
optimizer=optimizer,
n_samples_per_epoch=10,
w=0.001,
log_tensorboard=True,
gradient_estimation_method="pathwise",
gradient_clipping_norm=1.0,
gradient_horizon=0
)
vi.run(data, n_epochs=1000, max_epochs_without_improvement=50);
35%|██████████████████████████████████████████████████████████████████▋ | 351/1000 [04:24<08:08, 1.33it/s, loss=-.00783, reg.=0.0097, total=0.00187, best loss=-.00936, epochs since improv.=50]
In [9]:
Copied!
# We can load the best model to check the results
estimator.load_state_dict(vi.best_estimator_state_dict)
# We can load the best model to check the results
estimator.load_state_dict(vi.best_estimator_state_dict)
Out[9]:
<All keys matched successfully>
In [10]:
Copied!
# Plot the final flow posterior approximator and compare it to the real parameters:
samples = estimator.sample(10000)[0].detach().numpy()
samples_prior = prior.sample((10000,)).numpy()
pygtc.plotGTC([samples, samples_prior], figureSize=10, truths = true_parameters.numpy(), chainLabels = ["flow", "prior"]);
#corner(samples, truths=true_parameters.numpy(), smooth=2);#, range=[(0,1) for i in range(3)]+ [(-1,0)]);
# Plot the final flow posterior approximator and compare it to the real parameters:
samples = estimator.sample(10000)[0].detach().numpy()
samples_prior = prior.sample((10000,)).numpy()
pygtc.plotGTC([samples, samples_prior], figureSize=10, truths = true_parameters.numpy(), chainLabels = ["flow", "prior"]);
#corner(samples, truths=true_parameters.numpy(), smooth=2);#, range=[(0,1) for i in range(3)]+ [(-1,0)]);
In [11]:
Copied!
n_samples = 1
with torch.no_grad():
flow_samples = estimator.sample((n_samples))[0]
f, ax = plt.subplots(figsize=(20, 3))
for i in range(n_samples):
prediction = model.observe(model.run(flow_samples[i]))
ax.plot(prediction[0].cpu(), color = "C0", label = "predicted")
true = model.observe(model.run(true_parameters))
ax.plot(true[0], color = "black", label = "true")
ax.legend()
n_samples = 1
with torch.no_grad():
flow_samples = estimator.sample((n_samples))[0]
f, ax = plt.subplots(figsize=(20, 3))
for i in range(n_samples):
prediction = model.observe(model.run(flow_samples[i]))
ax.plot(prediction[0].cpu(), color = "C0", label = "predicted")
true = model.observe(model.run(true_parameters))
ax.plot(true[0], color = "black", label = "true")
ax.legend()
Out[11]:
<matplotlib.legend.Legend at 0x297e5eef0>
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!
In [ ]:
Copied!