/
ForwardBackward.java
236 lines (214 loc) · 8.08 KB
/
ForwardBackward.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
package aima.core.probability.temporal.generic;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import aima.core.probability.CategoricalDistribution;
import aima.core.probability.FiniteProbabilityModel;
import aima.core.probability.RandomVariable;
import aima.core.probability.proposition.AssignmentProposition;
import aima.core.probability.proposition.Proposition;
import aima.core.probability.temporal.ForwardBackwardInference;
import aima.core.probability.util.ProbUtil;
import aima.core.probability.util.ProbabilityTable;
import aima.core.probability.util.RandVar;
/**
* Artificial Intelligence A Modern Approach (3rd Edition): page 576.<br>
* <br>
*
* <pre>
* function FORWARD-BACKWARD(ev, prior) returns a vector of probability distributions
* inputs: ev, a vector of evidence values for steps 1,...,t
* prior, the prior distribution on the initial state, <b>P</b>(X<sub>0</sub>)
* local variables: fv, a vector of forward messages for steps 0,...,t
* b, a representation of the backward message, initially all 1s
* sv, a vector of smoothed estimates for steps 1,...,t
*
* fv[0] <- prior
* for i = 1 to t do
* fv[i] <- FORWARD(fv[i-1], ev[i])
* for i = t downto 1 do
* sv[i] <- NORMALIZE(fv[i] * b)
* b <- BACKWARD(b, ev[i])
* return sv
* </pre>
*
* Figure 15.4 The forward-backward algorithm for smoothing: computing posterior
* probabilities of a sequence of states given a sequence of observations. The
* FORWARD and BACKWARD operators are defined by Equations (15.5) and (15.9),
* respectively.<br>
* <br>
* <b>Note:</b> An implementation of the FORWARD-BACKWARD algorithm using the
* general purpose probability APIs, i.e. the underlying model implementation is
* abstracted away.
*
* @author Ciaran O'Reilly
*/
public class ForwardBackward implements ForwardBackwardInference {
private FiniteProbabilityModel transitionModel = null;
private Map<RandomVariable, RandomVariable> tToTm1StateVarMap = new HashMap<RandomVariable, RandomVariable>();
private FiniteProbabilityModel sensorModel = null;
/**
* Instantiate an instance of the Forward Backward algorithm.
*
* @param transitionModel
* the transition model.
* @param tToTm1StateVarMap
* a map from the X<sub>t<sub> random variables in the transition
* model the to X<sub>t-1</sub> random variables.
* @param sensorModel
* the sensor model.
*/
public ForwardBackward(FiniteProbabilityModel transitionModel,
Map<RandomVariable, RandomVariable> tToTm1StateVarMap,
FiniteProbabilityModel sensorModel) {
this.transitionModel = transitionModel;
this.tToTm1StateVarMap.putAll(tToTm1StateVarMap);
this.sensorModel = sensorModel;
}
//
// START-ForwardBackwardInference
// function FORWARD-BACKWARD(ev, prior) returns a vector of probability
// distributions
@Override
public List<CategoricalDistribution> forwardBackward(
List<List<AssignmentProposition>> ev, CategoricalDistribution prior) {
// local variables: fv, a vector of forward messages for steps 0,...,t
List<CategoricalDistribution> fv = new ArrayList<CategoricalDistribution>(
ev.size() + 1);
// b, a representation of the backward message, initially all 1s
CategoricalDistribution b = initBackwardMessage();
// sv, a vector of smoothed estimates for steps 1,...,t
List<CategoricalDistribution> sv = new ArrayList<CategoricalDistribution>(
ev.size());
// fv[0] <- prior
fv.add(prior);
// for i = 1 to t do
for (int i = 0; i < ev.size(); i++) {
// fv[i] <- FORWARD(fv[i-1], ev[i])
fv.add(forward(fv.get(i), ev.get(i)));
}
// for i = t downto 1 do
for (int i = ev.size() - 1; i >= 0; i--) {
// sv[i] <- NORMALIZE(fv[i] * b)
sv.add(0, fv.get(i + 1).multiplyBy(b).normalize());
// b <- BACKWARD(b, ev[i])
b = backward(b, ev.get(i));
}
// return sv
return sv;
}
@Override
public CategoricalDistribution forward(CategoricalDistribution f1_t,
List<AssignmentProposition> e_tp1) {
final CategoricalDistribution s1 = new ProbabilityTable(f1_t.getFor());
// Set up required working variables
Proposition[] props = new Proposition[s1.getFor().size()];
int i = 0;
for (RandomVariable rv : s1.getFor()) {
props[i] = new RandVar(rv.getName(), rv.getDomain());
i++;
}
final Proposition Xtp1 = ProbUtil.constructConjunction(props);
final AssignmentProposition[] xt = new AssignmentProposition[tToTm1StateVarMap
.size()];
final Map<RandomVariable, AssignmentProposition> xtVarAssignMap = new HashMap<RandomVariable, AssignmentProposition>();
i = 0;
for (RandomVariable rv : tToTm1StateVarMap.keySet()) {
xt[i] = new AssignmentProposition(tToTm1StateVarMap.get(rv),
"<Dummy Value>");
xtVarAssignMap.put(rv, xt[i]);
i++;
}
// Step 1: Calculate the 1 time step prediction
// ∑<sub>x<sub>t</sub></sub>
CategoricalDistribution.Iterator if1_t = new CategoricalDistribution.Iterator() {
public void iterate(Map<RandomVariable, Object> possibleWorld,
double probability) {
// <b>P</b>(X<sub>t+1</sub> | x<sub>t</sub>)*
// P(x<sub>t</sub> | e<sub>1:t</sub>)
for (Map.Entry<RandomVariable, Object> av : possibleWorld
.entrySet()) {
xtVarAssignMap.get(av.getKey()).setValue(av.getValue());
}
int i = 0;
for (double tp : transitionModel
.posteriorDistribution(Xtp1, xt).getValues()) {
s1.setValue(i, s1.getValues()[i] + (tp * probability));
i++;
}
}
};
f1_t.iterateOver(if1_t);
// Step 2: multiply by the probability of the evidence
// and normalize
// <b>P</b>(e<sub>t+1</sub> | X<sub>t+1</sub>)
CategoricalDistribution s2 = sensorModel.posteriorDistribution(ProbUtil
.constructConjunction(e_tp1.toArray(new Proposition[e_tp1
.size()])), Xtp1);
return s2.multiplyBy(s1).normalize();
}
@Override
public CategoricalDistribution backward(CategoricalDistribution b_kp2t,
List<AssignmentProposition> e_kp1) {
final CategoricalDistribution b_kp1t = new ProbabilityTable(
b_kp2t.getFor());
// Set up required working variables
Proposition[] props = new Proposition[b_kp1t.getFor().size()];
int i = 0;
for (RandomVariable rv : b_kp1t.getFor()) {
RandomVariable prv = tToTm1StateVarMap.get(rv);
props[i] = new RandVar(prv.getName(), prv.getDomain());
i++;
}
final Proposition Xk = ProbUtil.constructConjunction(props);
final AssignmentProposition[] ax_kp1 = new AssignmentProposition[tToTm1StateVarMap
.size()];
final Map<RandomVariable, AssignmentProposition> x_kp1VarAssignMap = new HashMap<RandomVariable, AssignmentProposition>();
i = 0;
for (RandomVariable rv : b_kp1t.getFor()) {
ax_kp1[i] = new AssignmentProposition(rv, "<Dummy Value>");
x_kp1VarAssignMap.put(rv, ax_kp1[i]);
i++;
}
final Proposition x_kp1 = ProbUtil.constructConjunction(ax_kp1);
props = new Proposition[e_kp1.size()];
final Proposition pe_kp1 = ProbUtil.constructConjunction(e_kp1
.toArray(props));
// ∑<sub>x<sub>k+1</sub></sub>
CategoricalDistribution.Iterator ib_kp2t = new CategoricalDistribution.Iterator() {
public void iterate(Map<RandomVariable, Object> possibleWorld,
double probability) {
// Assign current values for x<sub>k+1</sub>
for (Map.Entry<RandomVariable, Object> av : possibleWorld
.entrySet()) {
x_kp1VarAssignMap.get(av.getKey()).setValue(av.getValue());
}
// P(e<sub>k+1</sub> | x<sub>k+1</sub>)
// P(e<sub>k+2:t</sub> | x<sub>k+1</sub>)
double p = sensorModel.posterior(pe_kp1, x_kp1) * probability;
// <b>P</b>(x<sub>k+1</sub> | X<sub>k</sub>)
int i = 0;
for (double tp : transitionModel.posteriorDistribution(x_kp1,
Xk).getValues()) {
b_kp1t.setValue(i, b_kp1t.getValues()[i] + (tp * p));
i++;
}
}
};
b_kp2t.iterateOver(ib_kp2t);
return b_kp1t;
}
// END-ForwardBackwardInference
//
//
// PRIVATE METHODS
//
private CategoricalDistribution initBackwardMessage() {
ProbabilityTable b = new ProbabilityTable(tToTm1StateVarMap.keySet());
for (int i = 0; i < b.size(); i++) {
b.setValue(i, 1.0);
}
return b;
}
}