Inferpy vs Edward¶
This section shows the equivalent Edward code for those models in Probabilistic Model Zoo section.
Bayesian Linear Regression¶
# model definition
with inf.ProbModel() as model:
# define the weights
w0 = inf.models.Normal(0,1)
w = inf.models.Normal(0, 1, dim=d)
# define the generative model
with inf.replicate(size=N):
x = inf.models.Normal(0, 1, observed=True, dim=d)
y = inf.models.Normal(w0 + inf.dot(x,w), 1.0, observed=True)
# compile and fit the model with training data
model.compile()
data = {x: x_train, y: y_train}
model.fit(data)
# print the posterior distributions
Gaussian Mixture¶
# define the generative model
with inf.replicate(size=N):
z = inf.models.Categorical(probs=p)
x = inf.models.Normal(mu[z], sigma[z], observed=True, dim=d)
# compile and fit the model with training data
data = {x: x_train}
model.compile(infMethod="MCMC")
model.fit(data)
# print the posterior
print(model.posterior(mu))
############################## Edward ##################################################
# model definition
# prior distributions
p = ed.models.Dirichlet(concentration=tf.ones(K) / K)
mu = ed.models.Normal(0.0, 1.0, sample_shape=[K, d])
sigma = ed.models.InverseGamma(concentration=1.0, rate=1.0, sample_shape=[K, d])
# define the generative model
z = ed.models.Categorical(logits=tf.log(p) - tf.log(1.0 - p), sample_shape=N)
x = ed.models.Normal(loc=tf.gather(mu, z), scale=tf.gather(sigma, z))
# compile and fit the model with training data
qp = ed.models.Empirical(params=tf.get_variable("qp/params", [T, K],
initializer=tf.constant_initializer(1.0 / K)))
qmu = ed.models.Empirical(params=tf.get_variable("qmu/params", [T, K, d],
initializer=tf.zeros_initializer()))
qsigma = ed.models.Empirical(params=tf.get_variable("qsigma/params", [T, K, d],
initializer=tf.ones_initializer()))
qz = ed.models.Empirical(params=tf.get_variable("qz/params", [T, N],
initializer=tf.zeros_initializer(),
dtype=tf.int32))
gp = ed.models.Dirichlet(concentration=tf.ones(K))
gmu = ed.models.Normal(loc=tf.ones([K, d]), scale=tf.ones([K, d]))
gsigma = ed.models.InverseGamma(concentration=tf.ones([K, d]), rate=tf.ones([K, d]))
gz = ed.models.Categorical(logits=tf.zeros([N, K]))
inference = ed.MetropolisHastings(
latent_vars={p: qp, mu: qmu, sigma: qsigma, z: qz},
proposal_vars={p: gp, mu: gmu, sigma: gsigma, z: gz},
data={x: x_train})
inference.run()
# print the posterior
print(qmu.params.eval())
Logistic Regression¶
# define the weights
w0 = ed.models.Normal(loc=tf.zeros(1), scale=tf.ones(1))
w = ed.models.Normal(loc=tf.zeros(d), scale=tf.ones(d))
# define the generative model
x = ed.models.Normal(loc=tf.zeros([N,d]), scale=tf.ones([N,d]))
y = ed.models.Bernoulli(logits=ed.dot(x, w) + w0)
# compile and fit the model with training data
qw = ed.models.Normal(loc=tf.Variable(tf.random_normal([d])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([d]))))
qw0 = ed.models.Normal(loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
inference = ed.KLqp({w: qw, w0: qw0}, data={x: x_train, y: y_train.reshape(N)})
inference.run()
# print the posterior distributions
print([qw.loc.eval(), qw0.loc.eval()])
Multinomial Logistic Regression¶
# define the weights
w0 = ed.models.Normal(loc=tf.zeros(K), scale=tf.ones(K))
w = ed.models.Normal(loc=tf.zeros([K,d]), scale=tf.ones([K,d]))
# define the generative model
x = ed.models.Normal(loc=tf.zeros([N,d]), scale=tf.ones([N,d]))
y = ed.models.Normal(loc=w0 + tf.matmul(x, w, transpose_b=True), scale=tf.ones([N,K]))
# compile and fit the model with training data
qw = ed.models.Normal(loc=tf.Variable(tf.random_normal([K,d])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([K,d]))))
qw0 = ed.models.Normal(loc=tf.Variable(tf.random_normal([K])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([K]))))
inference = ed.KLqp({w: qw, w0: qw0}, data={x: x_train, y: y_train})
inference.run()
# print the posterior distributions
print([qw.loc.eval(), qw0.loc.eval()])
Linear Factor Model (PCA)¶
# define the weights
w = ed.models.Normal(loc=tf.zeros([K,d]), scale=tf.ones([K,d]))
# define the generative model
z = ed.models.Normal(loc=tf.zeros([N,K]), scale=tf.ones([N,K]))
x = ed.models.Normal(loc=tf.matmul(z,w), scale=tf.ones([N,d]))
# compile and fit the model with training data
qw = ed.models.Normal(loc=tf.Variable(tf.random_normal([K,d])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([K,d]))))
inference = ed.KLqp({w: qw}, data={x: x_train})
inference.run()
# print the posterior distributions
print([qw.loc.eval()])
PCA with ARD Prior (PCA)¶
# define the weights
w = ed.models.Normal(loc=tf.zeros([K,d]), scale=tf.ones([K,d]))
sigma = ed.models.InverseGamma(1.,1.)
# define the generative model
z = ed.models.Normal(loc=tf.zeros([N,K]), scale=tf.ones([N,K]))
x = ed.models.Normal(loc=tf.matmul(z,w), scale=sigma)
# compile and fit the model with training data
qw = ed.models.Normal(loc=tf.Variable(tf.random_normal([K,d])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([K,d]))))
inference = ed.KLqp({w: qw}, data={x: x_train})
inference.run()
# print the posterior distributions
print([qw.loc.eval()])