bnp

Some older Bayesian nonparametrics research.
Log | Files | Refs | README | LICENSE

commit 23159800bfc7b1004f7ca603635bd11ef1f5dc57
parent a671e8b25e262f8b0899c8936079c35cb886de86
Author: Jared Tobin <jared@jtobin.ca>
Date:   Wed,  9 Mar 2016 16:14:39 +1300

Add likelihood tracing to Gibbs.

Diffstat:
Mfinite-gaussian-mixture/src/fmm_conditional.r | 9++++-----
Mfinite-gaussian-mixture/src/fmm_generative.r | 12++++++++++--
2 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/finite-gaussian-mixture/src/fmm_conditional.r b/finite-gaussian-mixture/src/fmm_conditional.r @@ -71,11 +71,9 @@ conditional_location_model = function(y, z, s, l, r) { } }) - # FIXME (jtobin): check these against tim hopper's m = (yt * s + l * r) / (n * s + r) v = 1 / (n * s + r) - # FIXME (jtobin): check this mapply(rnorm, 1, m, v) } @@ -110,12 +108,12 @@ conditional_precision_model = function(y, z, m, b, w) { } inverse_model = function(n, k, y, a, l, r, b, w) { - kernel = function(p0, m0, s0) { + gibbs = function(p0, m0, s0) { z = conditional_label_model(y, p0, m0, s0) p1 = conditional_mixing_model(y, k, z, a) m1 = conditional_location_model(y, z, s0, l, r) s1 = conditional_precision_model(y, z, m1, b, w) - list(p = p1, m = m1, s = s1) + list(p = p1, m = m1, s = s1, l = lmodel(y, z, p1, m1, s1)) } p0 = mixing_model(k, a) @@ -125,10 +123,11 @@ inverse_model = function(n, k, y, a, l, r, b, w) { acc = params for (j in seq(n - 1)) { - params = kernel(params$p, params$m, params$s) + params = gibbs(params$p, params$m, params$s) acc$p = rbind(acc$p, params$p) acc$m = rbind(acc$m, params$m) acc$s = rbind(acc$s, params$s) + acc$l = c(acc$l, params$l) } acc } diff --git a/finite-gaussian-mixture/src/fmm_generative.r b/finite-gaussian-mixture/src/fmm_generative.r @@ -12,12 +12,20 @@ parameter_model = function(k, n) { mu = location_model(k, 0, 0.1) s = precision_model(k, 1, 1) list(c, mu, s) - } +} data_model = function(config) { sampler = function(y, m, s) rnorm(y, m, 1 / s) mapply(sampler, config[[1]], config[[2]], config[[3]]) - } +} model = function(k, n) parameter_model(k, n) %>% data_model +lmodel = function(y, z, p, m, t) { + clustered = data.frame(value = y, L1 = z) + cluster = clustered$L1 + score = log(p[cluster]) + + dnorm(clustered$value, m[cluster], sqrt(1 / p[cluster]), log = T) + sum(score) +} +