model {
for (i in 1:N) {
y[i] ~ dnorm(mu[i], tau)
mu[i] <- lambda[T[i]]
T[i] ~ dcat(p[])
}
p[1:2] ~ ddirch(alpha[])
alpha[1] <- 1
alpha[2] <- 1
theta ~ dunif(0, 1000)
lambda[2] <- lambda[1] + theta
lambda[1] ~ dunif(-1000, 1000)
sigma ~ dunif(0, 100)
tau <- 1 / pow(sigma, 2)

# generate a prediction from this model
T.pred ~ dcat(p[])   
y.pred ~ dnorm(lambda[T.pred], tau)
}


Data:
list(y = c(529.0, 530.0, 532.0, 533.1, 533.4, 533.6, 533.7, 534.1, 534.8, 535.3,
535.4, 535.9, 536.1, 536.3, 536.4, 536.6, 537.0, 537.4, 537.5, 538.3,
538.5, 538.6, 539.4, 539.6, 540.4, 540.8, 542.0, 542.8, 543.0, 543.5,
543.8, 543.9, 545.3, 546.2, 548.8, 548.7, 548.9, 549.0, 549.4, 549.9,
550.6, 551.2, 551.4, 551.5, 551.6, 552.8, 552.9,553.2), N = 48,
   T = c(1, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,         
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 2))


Inits:
list(lambda = c(535, NA), theta = 5, sigma = 10)

   node   mean   sd   MC error   2.5%   median   97.5%   start   sample
   p[1]   0.5991   0.09407   9.422E-4   0.41   0.6031   0.767   1001   81000
   p[2]   0.4009   0.09407   9.422E-4   0.2331   0.3969   0.5901   1001   81000
   lambda[1]   536.8   1.046   0.01226   535.0   536.8   539.1   1001   81000
   lambda[2]   548.8   1.486   0.01868   545.2   548.9   551.3   1001   81000
   sigma   3.904   0.7723   0.01179   2.964   3.741   6.218   1001   81000
   theta   11.97   1.745   0.02587   7.73   12.17   14.5   1001   81000
   
Probabilities that each point is in mixture group 2. To obtain this, monitor node T, select Inference->Compare, node T, axis y, scatterplot...

[example-11_6_1-eyes0]

[example-11_6_1-eyes1][example-11_6_1-eyes2]
[example-11_6_1-eyes3][example-11_6_1-eyes4]