Skip to content

Commit

Permalink
Merge pull request #669 from p2t2/Dev4.1-NewChain
Browse files Browse the repository at this point in the history
Dev4.1 new chain
  • Loading branch information
mreposa committed Jan 13, 2017
2 parents 290abb5 + bd743ca commit d3df25c
Show file tree
Hide file tree
Showing 144 changed files with 7,824 additions and 1,140 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Expand Up @@ -20,3 +20,6 @@ project/plugins/project/
.classpath
.project
/bin/

#IntelliJ
.idea/
1 change: 1 addition & 0 deletions Figaro/.gitignore
@@ -1,3 +1,4 @@
/bin/
/.cache-main
/.cache-tests
/build.properties
8 changes: 4 additions & 4 deletions Figaro/META-INF/MANIFEST.MF
Expand Up @@ -2,7 +2,7 @@ Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: Figaro
Bundle-SymbolicName: com.cra.figaro
Bundle-Version: 4.0.0
Bundle-Version: 4.1.0
Export-Package: com.cra.figaro.algorithm,
com.cra.figaro.algorithm.decision,
com.cra.figaro.algorithm.decision.index,
Expand All @@ -13,16 +13,16 @@ Export-Package: com.cra.figaro.algorithm,
com.cra.figaro.language,
com.cra.figaro.library.atomic.continuous,
com.cra.figaro.library.atomic.discrete,
com.cra.figaro.library.collection,
com.cra.figaro.library.compound,
com.cra.figaro.library.decision,
com.cra.figaro.util
Bundle-Vendor: Charles River Analytics
Bundle-RequiredExecutionEnvironment: JavaSE-1.6
Require-Bundle: org.scala-lang.scala-library,
org.scala-lang.scala-reflect,
com.typesafe.akka.actor;bundle-version="2.3.3",
com.typesafe.config;bundle-version="1.2.1",
org.scalatest;bundle-version="2.1.6"
com.typesafe.config,
com.typesafe.akka.actor
Import-Package: org.apache.commons.math3.distribution;version="3.3.0"


Expand Down
2 changes: 1 addition & 1 deletion Figaro/figaro_build.properties
@@ -1 +1 @@
version=4.0.0.0
version=4.1.0.0
Expand Up @@ -126,6 +126,18 @@ trait BaseProbQueryAlgorithm[U[_]]
doExpectation(target, function)
}

/**
* Return an estimate of the expectation of the function under the marginal probability distribution
* of the target.
* Throws NotATargetException if called on a target that is not in the list of
* targets of the algorithm.
* Throws AlgorithmInactiveException if the algorithm is inactive.
*/
def expectation[T](target: U[T])(function: T => Double, c: Any = DummyImplicit): Double = {
check(target)
doExpectation(target, function)
}

/**
* Return the mean of the probability density function for the given continuous element.
*/
Expand Down Expand Up @@ -154,6 +166,18 @@ trait BaseProbQueryAlgorithm[U[_]]
doProbability(target, predicate)
}

/**
* Return an estimate of the probability of the predicate under the marginal probability distribution
* of the target.
* Throws NotATargetException if called on a target that is not in the list of
* targets of the algorithm.
* Throws AlgorithmInactiveException if the algorithm is inactive.
*/
def probability[T](target: U[T])(predicate: T => Boolean, c: Any = DummyImplicit): Double = {
probability(target, predicate)
}


/**
* Return an estimate of the probability that the target produces the value.
* Throws NotATargetException if called on a target that is not in the list of
Expand All @@ -165,3 +189,12 @@ trait BaseProbQueryAlgorithm[U[_]]
doProbability(target, (t: T) => t == value)
}
}


trait StreamableProbQueryAlgorithm extends ProbQueryAlgorithm {
/**
* Sample an value from the posterior of this element
*/
def sampleFromPosterior[T](element: Element[T]): Stream[T]
}

Expand Up @@ -137,7 +137,7 @@ class ProbQueryVariableEliminationDecision[T, U](override val universe: Universe
*
*/
private def marginalizeToTarget(factor: Factor[(Double, Double)], target: Element[_]): Unit = {
val unnormalizedTargetFactor = factor.marginalizeTo(semiring, Variable(target))
val unnormalizedTargetFactor = factor.marginalizeTo(Variable(target))
val z = unnormalizedTargetFactor.foldLeft(semiring.zero, (x: (Double, Double), y: (Double, Double)) => (x._1 + y._1, 0.0))
//val targetFactor = Factory.make[(Double, Double)](unnormalizedTargetFactor.variables)
val targetFactor = unnormalizedTargetFactor.mapTo((d: (Double, Double)) => (d._1 / z._1, d._2))
Expand Down
Expand Up @@ -41,7 +41,7 @@ trait FactoredAlgorithm[T] extends Algorithm {
* If any of these elements has * in its range, the lower and upper bounds of factors will be different, so we need to compute both.
* If they don't, we don't need to compute bounds.
*/
def getNeededElements(starterElements: List[Element[_]], depth: Int): (List[Element[_]], Boolean) = {
def getNeededElements(starterElements: List[Element[_]], depth: Int, parameterized: Boolean = false): (List[Element[_]], Boolean) = {
// Since there may be evidence on the dependent universes, we have to include their parents as important elements
val dependentUniverseParents =
for {
Expand All @@ -63,7 +63,7 @@ trait FactoredAlgorithm[T] extends Algorithm {
}
// Make sure we compute values from scratch in case the elements have changed
LazyValues.clear(universe)
val values = LazyValues(universe)
val values = LazyValues(universe, parameterized)

/*
* Beginning with the given element at the given depth, find all elements that the given element is used by within the depth.
Expand Down
Expand Up @@ -24,13 +24,21 @@ import com.cra.figaro.util.MapResampler
import com.cra.figaro.algorithm.factored.factors.Factor

/**
* Class to handle sampling from continuous elements in PBP
* @param argSamples Maximum number of samples to take from atomic elements
* @param totalSamples Maximum number of samples on the output of chains
* Class to handle sampling from continuous elements to make factors
* @param numSamplesFromAtomics Maximum number of samples to take from atomic elements
* @param maxNumSamplesAtChain Maximum number of samples on the output of chains
* @param de An instance to compute the density estimate of point during resampling
*/
class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTotalSamples: Int) {
class ParticleGenerator(de: DensityEstimator, val numSamplesFromAtomics: Int, val maxNumSamplesAtChain: Int) {

@deprecated("numArgSamples is deprecated. Please use numSamplesFromAtomics", "4.1")
val numArgSamples = numSamplesFromAtomics

@deprecated("numTotalSamples is deprecated. Please use maxNumSamplesAtChain", "4.1")
val numTotalSamples = maxNumSamplesAtChain

var warningIssued = false

// Caches the samples for an element
private val sampleMap = Map[Element[_], (List[(Double, _)], Int)]()

Expand All @@ -52,7 +60,7 @@ class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTot
/**
* Retrieves the samples for an element using the default number of samples.
*/
def apply[T](elem: Element[T]): List[(Double, T)] = apply(elem, numArgSamples)
def apply[T](elem: Element[T]): List[(Double, T)] = apply(elem, numSamplesFromAtomics)

/**
* Retrieves the samples for an element using the indicated number of samples
Expand All @@ -63,6 +71,10 @@ class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTot
e.asInstanceOf[(List[(Double, T)], Int)]._1
}
case None => {
if (!warningIssued) {
println("Warning: you are using a factored algorithm with continuous or infinite elements. The element will be sampled " + numSamples + " times")
warningIssued = true
}
val sampler = ElementSampler(elem, numSamples)
sampler.start
val result = sampler.computeDistribution(elem).toList
Expand All @@ -86,9 +98,9 @@ class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTot
def nextDouble(d: Double) = random.nextGaussian() * proposalVariance + d

val numSamples = sampleMap(elem)._2

val sampleDensity: Double = 1.0 / numSamples

// Generate new samples given the old samples for an element
val newSamples = elem match {
/* If the element is an instance of OneShifter (Geometric, poisson, etc),
Expand All @@ -97,7 +109,7 @@ class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTot
case o: OneShifter => {
val toResample = if (beliefs.size < numSamples) {
val resampler = new MapResampler(beliefs.map(s => (s._1, s._2)))
List.fill(numSamples)(1.0/numSamples, resampler.resample)
List.fill(numSamples)(1.0 / numSamples, resampler.resample)
} else {
beliefs
}
Expand All @@ -118,7 +130,7 @@ class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTot
// return the new particles
samples.groupBy(_._2).toList.map(s => (s._2.unzip._1.sum, s._2.head._2))
}

/* For atomic doubles, we do the same thing as the OneShifters, but we assume
* that we never need to resample since the number of particles equals numSamples.
* We propose a new double and check its acceptance. Note the proposal is symmetric.
Expand Down Expand Up @@ -146,9 +158,9 @@ class ParticleGenerator(de: DensityEstimator, val numArgSamples: Int, val numTot
* we estimate the density using the density estimator, then multiple all of the estimates together. Finally, since
* we only sample atomic elements, we multiple each result but the density of the values in the original element
*/
private def accept[T](elem: Atomic[_], oldValue: T, newValue: T, proposalProb: Double, beliefs: List[List[(Double, T)]]): T = {
val oldDensity = beliefs.map(de.getDensity(oldValue, _)).product*elem.asInstanceOf[Atomic[T]].density(oldValue)
val newDensity = beliefs.map(de.getDensity(newValue, _)).product*elem.asInstanceOf[Atomic[T]].density(newValue)
private def accept[T](elem: Atomic[_], oldValue: T, newValue: T, proposalProb: Double, beliefs: List[List[(Double, T)]]): T = {
val oldDensity = beliefs.map(de.getDensity(oldValue, _)).product * elem.asInstanceOf[Atomic[T]].density(oldValue)
val newDensity = beliefs.map(de.getDensity(newValue, _)).product * elem.asInstanceOf[Atomic[T]].density(newValue)
val ratio = (newDensity / oldDensity) * proposalProb

val nextValue = if (ratio > 1) {
Expand All @@ -164,12 +176,18 @@ object ParticleGenerator {
/**
* Maximum number of particles to generate per atomic
*/
var defaultArgSamples = 15
var defaultNumSamplesFromAtomics = 15

@deprecated("defaultArgSamples is deprecated. Please use defaultNumSamplesFromAtomics", "4.1")
var defaultArgSamples = defaultNumSamplesFromAtomics

/**
* Maximum number of particles to generate through a chain.
*/
var defaultTotalSamples = 15
var defaultMaxNumSamplesAtChain = 15

@deprecated("defaultTotalSamples is deprecated. Please use defaultMaxNumSamplesAtChain", "4.1")
var defaultTotalSamples = defaultMaxNumSamplesAtChain

private val samplerMap: Map[Universe, ParticleGenerator] = Map()

Expand All @@ -186,11 +204,11 @@ object ParticleGenerator {
/**
* Create a new particle generator for the given universe, using the given density estimatore, number of argument samples and total number of samples
*/
def apply(univ: Universe, de: DensityEstimator, numArgSamples: Int, numTotalSamples: Int): ParticleGenerator =
def apply(univ: Universe, de: DensityEstimator, numSamplesFromAtomics: Int, maxNumSamplesAtChain: Int): ParticleGenerator =
samplerMap.get(univ) match {
case Some(e) => e
case None => {
samplerMap += (univ -> new ParticleGenerator(de, numArgSamples, numTotalSamples))
samplerMap += (univ -> new ParticleGenerator(de, numSamplesFromAtomics, maxNumSamplesAtChain))
univ.registerUniverse(samplerMap)
samplerMap(univ)
}
Expand All @@ -200,7 +218,13 @@ object ParticleGenerator {
* Create a new particle generate for a universe using a constant density estimator and default samples
*/
def apply(univ: Universe): ParticleGenerator = apply(univ, new ConstantDensityEstimator,
defaultArgSamples, defaultTotalSamples)
defaultNumSamplesFromAtomics, defaultMaxNumSamplesAtChain)

/**
* Create a new particle generator for a universe using a constant density estimator and the number of argument samples and total number of samples
*/
def apply(univ: Universe, numSamplesFromAtomics: Int, maxNumSamplesAtChain: Int): ParticleGenerator = apply(univ, new ConstantDensityEstimator,
numSamplesFromAtomics, maxNumSamplesAtChain)

/**
* Check if a particle generate exists for this universe
Expand Down
Expand Up @@ -48,7 +48,7 @@ class VEGraph private (val info: Map[Variable[_], VariableInfo]) {
* variables appearing in a factor with the eliminated variable, and excludes all factors in which the
* eliminated variable appears.
*/
def eliminate(variable: Variable[_]): VEGraph = {
def eliminate(variable: Variable[_]): (VEGraph, Double) = {
val VariableInfo(oldFactors, allVars) = info(variable)
val newFactor = AbstractFactor((allVars - variable).toList)
var newInfo = VEGraph.makeInfo(info, List(newFactor), oldFactors)
Expand All @@ -57,7 +57,7 @@ class VEGraph private (val info: Map[Variable[_], VariableInfo]) {
newInfo += neighbor -> VariableInfo(oldNeighborFactors, oldNeighborNeighbors - variable)
}
newInfo(variable).neighbors foreach (removeNeighbor(_))
(new VEGraph(newInfo))
(new VEGraph(newInfo), VEGraph.cost(newFactor))
}

/**
Expand Down
Expand Up @@ -222,7 +222,7 @@ class ProbQueryVariableElimination(override val universe: Universe, targets: Ele
val semiring = SumProductSemiring()

private def marginalizeToTarget(factor: Factor[Double], target: Element[_]): Unit = {
val unnormalizedTargetFactor = factor.marginalizeTo(semiring.asInstanceOf[Semiring[Double]], Variable(target))
val unnormalizedTargetFactor = factor.marginalizeTo(Variable(target))
val z = unnormalizedTargetFactor.foldLeft(semiring.zero, _ + _)
//val targetFactor = Factory.make[Double](unnormalizedTargetFactor.variables)
val targetFactor = unnormalizedTargetFactor.mapTo((d: Double) => d / z)
Expand Down Expand Up @@ -269,29 +269,32 @@ object VariableElimination {
* minimizes the number of extra factor entries that would be created when it is eliminated.
* Override this method if you want a different rule.
*
* Returns the score of the ordering as well as the ordering.
* Returns the score of the ordering as well as the ordering. If useBestScore is set to false, then it returns the total score of the
* entire eliminiation operation
*/
def eliminationOrder[T](factors: Traversable[Factor[T]], toPreserve: Traversable[Variable[_]]): (Double, List[Variable[_]]) = {
def eliminationOrder[T](factors: Traversable[Factor[T]], toPreserve: Traversable[Variable[_]], useBestScore: Boolean = true): (Double, List[Variable[_]]) = {
val eliminableVars = (Set[Variable[_]]() /: factors)(_ ++ _.variables) -- toPreserve
var initialGraph = new VEGraph(factors)
val candidates = new HeapPriorityMap[Variable[_], Double]
eliminableVars foreach (v => candidates += v -> initialGraph.score(v))
eliminationOrderHelper(candidates, toPreserve, initialGraph, Double.NegativeInfinity, List())
val initScore = if (useBestScore) Double.NegativeInfinity else 0.0
eliminationOrderHelper(candidates, toPreserve, initialGraph, initScore, List(), useBestScore)
}

@tailrec private def eliminationOrderHelper(candidates: PriorityMap[Variable[_], Double],
toPreserve: Traversable[Variable[_]],
graph: VEGraph,
currentScore: Double,
accum: List[Variable[_]]): (Double, List[Variable[_]]) = {
accum: List[Variable[_]], useBestScore: Boolean): (Double, List[Variable[_]]) = {
if (candidates.isEmpty) (currentScore, accum.reverse)
else {
val (best, bestScore) = candidates.extractMin()
// do not read the best variable after it has been removed, and do not add the preserved variables
val touched = graph.info(best).neighbors - best -- toPreserve
val nextGraph = graph.eliminate(best)
val (nextGraph, newCost) = graph.eliminate(best)
touched foreach (v => candidates += v -> graph.score(v))
eliminationOrderHelper(candidates, toPreserve, nextGraph, bestScore max currentScore, best :: accum)
val nextScore = if (useBestScore) bestScore max currentScore else newCost+currentScore
eliminationOrderHelper(candidates, toPreserve, nextGraph, nextScore, best :: accum, useBestScore)
}
}

Expand Down

0 comments on commit d3df25c

Please sign in to comment.