Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Oslo method errors #187

Draft
wants to merge 38 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
da6739e
Added a Fermi-Gas like probability distribution
vetlewi Aug 3, 2021
5c1aa9b
Added ErrorFinder class
vetlewi Aug 3, 2021
97ca023
Changed filename for EnsembleNormalizer
vetlewi Aug 3, 2021
6efe711
Fixed bugs and make sure the dist test works
vetlewi Aug 3, 2021
cf266fa
Debug etc
vetlewi Aug 3, 2021
2d4c162
An updated, now fully working version of the error_finder class
vetlewi Aug 11, 2021
9682048
Added pyMC3 as dependency
vetlewi Aug 11, 2021
8ae8a10
Updated release notes
vetlewi Aug 11, 2021
88c7ac7
Initial version of the notebook
vetlewi Aug 11, 2021
65cae01
Added a call wrapper
vetlewi Aug 12, 2021
0e10713
Added an indices method similar to that of the Matrix class
vetlewi Aug 12, 2021
06796a6
Added uncertainty estimation after extraction
vetlewi Aug 12, 2021
14d79b0
Added note about the indices method
vetlewi Aug 12, 2021
13d896f
Overwrite file output to ensure errors are stored
vetlewi Aug 12, 2021
54ace62
Bugfix
vetlewi Aug 13, 2021
4331b52
Updates of the notebook
vetlewi Sep 27, 2021
92b3097
Fixing some style issues
vetlewi Sep 27, 2021
f50b513
Update release_note.md
vetlewi Sep 30, 2021
e5de25d
Moved `all_equal` function to end of file
vetlewi Sep 30, 2021
c958eb6
Update ompy/error_finder.py
vetlewi Sep 30, 2021
ba988f2
Minor style fixes
vetlewi Sep 30, 2021
8d6c577
Better error message
vetlewi Sep 30, 2021
785c674
Better comments
vetlewi Sep 30, 2021
4fb4766
Added note about the new notebook
vetlewi Sep 30, 2021
df97416
Prior params can be modified
vetlewi Oct 13, 2021
00e62f1
Added todo
vetlewi Oct 13, 2021
48cb4bb
Added option to suppress warnings
vetlewi Oct 22, 2021
dee03d7
Changed formula
vetlewi Oct 27, 2021
3a72c2f
Added plot example to docstring
vetlewi Oct 27, 2021
760a979
Removed the linear version
vetlewi Oct 28, 2021
621c95c
Refactoring of error_finder
vetlewi Oct 29, 2021
8ffc7e7
Ensure nan is removed
vetlewi Jan 5, 2022
19c79b7
Similar to previous commit, but with some additional fixes
vetlewi Jan 5, 2022
ab00bd8
Added kwargs
vetlewi Jan 5, 2022
419e1a0
Added option to get the trace from the inference
vetlewi Jan 5, 2022
0d87d8f
Merge branch 'nan-remove-normalizers' into oslo-method-errors
vetlewi Jan 5, 2022
0e61dff
Some changes... Only here though...
vetlewi Jul 25, 2022
394e29d
Updated to work with pymc version 5
vetlewi Jan 5, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1,131 changes: 1,131 additions & 0 deletions notebooks/Untitled.ipynb

Large diffs are not rendered by default.

194 changes: 194 additions & 0 deletions notebooks/model_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
import numpy as np
import ompy as om
import theano.tensor as tt
import pymc3 as pm

def diagonal_resolution(matrix, resolution_Ex):
"""Detector resolution at the Ex=Eg diagonal

Uses gaussian error propagations which assumes independence of
resolutions along Ex and Eg axis.

Args:
matrix (Matrix): Matrix for which the sesoluton shall be calculated

Returns:
resolution at Ex = Eg.
"""
def resolution_Eg(matrix):
"""Resolution along Eg axis for each Ex. Defaults in this class are for OSCAR.

Args:
matrix (Matrix): Matrix for which the sesoluton shall be calculated

Returns:
resolution
"""
def fFWHM(E, p):
return np.sqrt(p[0] + p[1] * E + p[2] * E**2)
fwhm_pars = np.array([73.2087, 0.50824, 9.62481e-05])
return fFWHM(matrix.Ex, fwhm_pars)

dEx = matrix.Ex[1] - matrix.Ex[0]
dEg = matrix.Eg[1] - matrix.Eg[0]
assert dEx == dEg

dE_resolution = np.sqrt(resolution_Ex**2
+ resolution_Eg(matrix)**2)
return dE_resolution




# Ops to calculate the FG matrix from the NLDs and Ts
class calculate_FG(tt.Op):

itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)

def __init__(self, matrix, std, E_nld):

self.matrix = matrix.copy()
self.std = std.copy()
self.resolution = diagonal_resolution(matrix, 150.)
self.E_nld = E_nld.copy(order='C')

self.matrix.values, self.std.values = om.extractor.normalize(self.matrix, self.std)

self.std.values = std.values.copy(order='C')
self.matrix.Ex = self.matrix.Ex.copy(order='C')
self.matrix.Eg = self.matrix.Eg.copy(order='C')
self.matrix.values = self.matrix.values.copy(order='C')


def perform(self, node, inputs, outputs):

(x,) = inputs
T = x[:self.matrix.Eg.size]
nld = x[self.matrix.Eg.size:]

fg_th = om.decomposition.nld_T_product(nld, T, self.resolution, self.E_nld,
self.matrix.Eg, self.matrix.Ex)

z = -0.5*np.array(om.decomposition.chisquare_diagonal(self.matrix.values, fg_th,
self.std.values, self.resolution,
self.matrix.Eg, self.matrix.Ex))
outputs[0][0] = z
print(z)
#return outputs[0][0]

class FG_loglike:
def __init__(self, matrix, std, E_nld):

self.matrix = matrix.copy()
self.std = std.copy()
self.resolution = diagonal_resolution(matrix, 150.)
self.E_nld = E_nld.copy(order='C')

self.matrix.values, self.std.values = om.extractor.normalize(self.matrix, self.std)

self.std.values = std.values.copy(order='C')
self.matrix.Ex = self.matrix.Ex.copy(order='C')
self.matrix.Eg = self.matrix.Eg.copy(order='C')
self.matrix.values = self.matrix.values.copy(order='C')

def __call__(self, x):
T = x[:self.matrix.Eg.size]
nld = x[self.matrix.Eg.size:]

fg_th = om.decomposition.nld_T_product(nld, T, self.resolution, self.E_nld,
self.matrix.Eg, self.matrix.Ex)

return om.decomposition.chisquare_diagonal(self.matrix.values, fg_th,
self.std.values, self.resolution,
self.matrix.Eg, self.matrix.Ex)

class LogLike2(tt.Op):
"""
Specify what type of object will be passed and returned to the Op when it is
called. In our case we will be passing it a vector of values (the parameters
that define our model) and returning a single "scalar" value (the
log-likelihood)
"""

itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)

def __init__(self, loglike):
"""
Initialise the Op with various things that our log-likelihood function
requires. Below are the things that are needed in this particular
example.

Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that our function requires.
"""

# add inputs as class attributes
self.likelihood = loglike

def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
(theta,) = inputs # this will contain my variables

# call the log-likelihood function
logl = self.likelihood(theta)

outputs[0][0] = np.array(logl) # output the log-likelihood

def loglike(theta, x, data, sigma):
return -0.5*np.sum(theta)

# define a theano Op for our likelihood function
class LogLike(tt.Op):

"""
Specify what type of object will be passed and returned to the Op when it is
called. In our case we will be passing it a vector of values (the parameters
that define our model) and returning a single "scalar" value (the
log-likelihood)
"""

itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)

def __init__(self, loglike, data, x, sigma):
"""
Initialise the Op with various things that our log-likelihood function
requires. Below are the things that are needed in this particular
example.

Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that our function requires.
"""

# add inputs as class attributes
self.likelihood = loglike
self.data = data
self.x = x
self.sigma = sigma

def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
(theta,) = inputs # this will contain my variables

# call the log-likelihood function
logl = self.likelihood(theta, self.x, self.data, self.sigma)

outputs[0][0] = np.array(logl) # output the log-likelihood