|
| 1 | +import numpy |
| 2 | +import scipy |
| 3 | + |
| 4 | + |
| 5 | +def Norm1(x, normOrder= 0): |
| 6 | + return scipy.linalg.norm(numpy.array(x, normOrder)) |
| 7 | + |
| 8 | +def Norm2(a,b, normOrder= 0): |
| 9 | + #TODO: dot product: a* b |
| 10 | + x = a*b |
| 11 | + # sp.linalg.LinAlgError |
| 12 | + return scipy.linalg.norm(numpy.array(x, normOrder)) |
| 13 | + |
| 14 | +def modifyNonList(x): |
| 15 | + """to solve the first issue (whether it is something or list` |
| 16 | + works when comparing `int` , and changes it to a list |
| 17 | +
|
| 18 | + Note: only works for 1-D data |
| 19 | +
|
| 20 | + see also: |
| 21 | +
|
| 22 | + 1.related functions: morph(x, _lenOther) |
| 23 | + 2. composite functions: `Norm(a,b,normOrder=None)` |
| 24 | +""" |
| 25 | + |
| 26 | + _lenX = 0 |
| 27 | + if( type(x) == list): |
| 28 | + _lenX = len(x) |
| 29 | + else: |
| 30 | + pass #no change |
| 31 | + #_lenX = 0 |
| 32 | + return _lenX |
| 33 | + |
| 34 | +def morph(x, _lenOther): |
| 35 | + """ changes the small one, to suit the the length of the other""" |
| 36 | + return numpy.reshape(x, (1,_lenOther) ) |
| 37 | + |
| 38 | +# another way for forming a Norm |
| 39 | + |
| 40 | + |
| 41 | +def Norm(a,b,normOrder=None): #best |
| 42 | + """for 1D data """ |
| 43 | + _lenA = 0; _lenB = 0; |
| 44 | + print(a) |
| 45 | + print(b) |
| 46 | + |
| 47 | + #if not equal , find which is the longest |
| 48 | + #do: more Type-oriented programming |
| 49 | + |
| 50 | + #Type-check a & b , then modify |
| 51 | + |
| 52 | + a = modifyNonList(a) # instead of len(a) |
| 53 | + b = modifyNonList(b) # instead of len(b) |
| 54 | + |
| 55 | + _lenA = a |
| 56 | + _lenB = b |
| 57 | + masterLength = -1 |
| 58 | + #by default: _lenA == _lenB ; if not, do the following (sub-routine): |
| 59 | + |
| 60 | + #0. modify b to suit a's length |
| 61 | + if _lenA > _lenB: |
| 62 | + |
| 63 | + masterLength = _lenA |
| 64 | + # change (morph) the small part |
| 65 | + #b = numpy.reshape(b, (1,_lenA) ) |
| 66 | + b = morph(b, _lenA) |
| 67 | + |
| 68 | + #modify a to suit a's length |
| 69 | + elif _lenB > _lenA: |
| 70 | + masterLength = _lenB |
| 71 | + #change (morph) the small part |
| 72 | + a = numpy.reshape( a, (1, _lenB) ) |
| 73 | + a = morph(a, _lenA) |
| 74 | + |
| 75 | + |
| 76 | + |
| 77 | + #1. dot product (of 2 vectors): |
| 78 | + x = a * b |
| 79 | + |
| 80 | + #2. calculate the norm (given vector x, `normOrder` |
| 81 | + #return numpy.linalg.dot(a, b ) #<---- UnboundLocalError: np |
| 82 | + return scipy.linalg.norm(numpy.array(x, normOrder)) |
| 83 | + # sp.linalg.inf |
| 84 | + |
| 85 | + """ |
| 86 | + formsa a 1-D or 2-D |
| 87 | + (unless order `ord = None` |
| 88 | + Note: If both axis and ord are None, the 2-norm of a.ravel will be returned. |
| 89 | + """ |
| 90 | + |
| 91 | + answer = -1 |
| 92 | + |
| 93 | + #check if equals |
| 94 | + if len(a) == len(b): |
| 95 | + |
| 96 | + #dimensions must match |
| 97 | + ##In that case, append b to a |
| 98 | + a = np.dot(a,b) #np.@(a,b) #a.append(b) |
| 99 | + #scipy.linalg.norm(a, ord = order, axis = None, keepdims=True,check_finite = True) |
| 100 | + #keepdims = True : to normalize `axes` over , resulting a dimensions = 1 ( |
| 101 | + # check_finite = True: checks for non-finite values (i.e. possible nans Nans) |
| 102 | + #answer = scipy.linalg.norm( a = a , ord = sc.linalg.norm(a, axis=1) , axis = None, keepdims=True,check_finite = True) |
| 103 | + print(a) |
| 104 | + |
| 105 | + #grand idea: for comple analysis : |
| 106 | + # answer = scipy.linalg.norm(np.array( [1e-5, 1e-5,1e-5, 1e-5]), None) #numpy.linalg.norm( numpy.dot(a.real, b.real) + dot(a.imag, b.imag), axis=None)# , keepdims=False) #<---- |
| 107 | + answer = Norm1(a, None) |
| 108 | + # pass |
| 109 | + return answer |
| 110 | + |
| 111 | + |
| 112 | +def Green(a,b): |
| 113 | + """ Note: this is only a place holder (of the actual function ) (and not the function itself""" |
| 114 | + |
| 115 | + return Norm(a,b) |
| 116 | +#from math import abs, max, min # functions are built-in with current language |
| 117 | +def distanceMeasure2(a,b): |
| 118 | + """finds the measure, based on the absolute value of the `variational distance`""" |
| 119 | + return abs( max(a,b) - min(a,b) ) |
| 120 | + |
| 121 | +def distanceMeasure1(a,b): |
| 122 | + """find nthe distance , based on squaring the `Variational Distance` """ |
| 123 | + |
| 124 | + return ( a - b)**2 |
| 125 | + |
| 126 | +def D(Fun): |
| 127 | + """ To Do: differentiate the function, iterativelyh (i.e. as much times as required) |
| 128 | + returns |
| 129 | + teh function itself |
| 130 | + Idea: the implementation of the function |
| 131 | + here: |
| 132 | + For a Green's function, return Dirac's Delta |
| 133 | + dirac's delta |
| 134 | + |
| 135 | + """ |
| 136 | + return Fun |
| 137 | + |
| 138 | +#global dumy placeholder variable |
| 139 | +#TODO: add optimization algorithm that searches and returns the `optimal` found solutions, as a vector `t` |
| 140 | +t_j_center = 0 |
| 141 | + |
| 142 | +def regularizationTerm(Lambda): |
| 143 | + """WARNING: D: differential operator |
| 144 | + And F_star is a function (or matrix) |
| 145 | + - The Differential Operator is supposed to able to differentiate F_star |
| 146 | + (Whatever it might be) |
| 147 | + [Most likely: a Polynomial] |
| 148 | + """ |
| 149 | + |
| 150 | + return lambda D, F_star: Lambda * Green( D(F_star), t_j_center ) #<---- |
| 151 | +Lambda = 1 |
| 152 | +F_star= 1 # TODO: Polynomial (TO Build) |
| 153 | +DifferentialOperator = D # int: ERROR: non-callable function (solution: replace witha a function Delegate (name) ) |
| 154 | + |
| 155 | +def regularizationTermDemo(DifferentialOperator, Lambda): |
| 156 | + |
| 157 | + #call regularizationTerm(Lambda) |
| 158 | + anonfunction = regularizationTerm(Lambda) |
| 159 | + |
| 160 | + print( "anonfunction (Signature): ", anonfunction ) # a valid lambda function |
| 161 | + |
| 162 | + print("D(F*) = ", DifferentialOperator(F_star))#convolution operator |
| 163 | + |
| 164 | + #Instantiate ( realize ) the new anonfunction using |
| 165 | + ##1. DifferentialOperator D (a function [Delegate] (i.e. name) ) |
| 166 | + |
| 167 | + ##2. F_star (F*) : value (for the (Differential) function, required to instantiate |
| 168 | + result = anonfunction( DifferentialOperator, F_star ) # 0.0 # correct answer |
| 169 | + |
| 170 | + print ("anonfunction_applied(D, F*) = ",result ) |
| 171 | + return result |
| 172 | + |
| 173 | +anonfunction = regularizationTerm(Lambda) |
| 174 | +print( "anonfunction (Signature): ", anonfunction ) # a valid lambda function |
| 175 | +print("D(F*) = ", DifferentialOperator(F_star))#convolution operator |
| 176 | +#print("D(F*) = ", DifferentialOperator(F_star))#convolution operator |
| 177 | + |
| 178 | +print ("anonfunction_applied(D, F*) = ", anonfunction( DifferentialOperator, F_star ) ) # 0.0 # correct answer |
| 179 | + |
| 180 | + |
| 181 | +# Regularization :DEMO |
| 182 | +## Assign an anonymous function |
| 183 | + |
| 184 | +anonfunction = regularizationTerm(Lambda) |
| 185 | + |
| 186 | +print( "anonfunction (Signature): ", anonfunction ) # a valid lambda function |
| 187 | + |
| 188 | + |
| 189 | + |
| 190 | +### evaluate lambda |
| 191 | +#desired function implementation : |
| 192 | + |
| 193 | +a = DifferentialOperator(F_star) #convolution operator |
| 194 | +print("Differential Function type = ", type(a) ) |
| 195 | + |
| 196 | +##Erroneous: scalar arrays can be conveted to a scalar index |
| 197 | + |
| 198 | +# print ("anonfunction_applied(D, F*) = ", anonfunction( DifferentialOperator, F_star ) ) # 0.0 # correct answer |
| 199 | + |
| 200 | +#overview regularization |
| 201 | +## a Survival function: |
| 202 | +### represents a shock wave : high at first, then it slowly dies out , in a |
| 203 | +### <some particular manner> (me: it could be equal to the half-life of some material ) |
| 204 | +### (or it could be more sophisticated function, as well ) |
| 205 | + |
| 206 | + |
| 207 | +#anonFunction = regularizationTerm(Lambda) |
| 208 | +#print(anonfunction( DifferentialOperator, F_star ) ) #<----- |
| 209 | + |
| 210 | + |
| 211 | + |
| 212 | +# F* (for 1-D input (a , b) |
| 213 | + |
| 214 | +def F_star(x, N, m1, w,t , d , G = Green ,step_size=1): |
| 215 | + """ |
| 216 | + Argument Input: |
| 217 | +
|
| 218 | + #1. input: |
| 219 | + x: data (here: the dot product (a, b) |
| 220 | +
|
| 221 | + N: dim1 : dimension of a (the 1st one) : int `Integer` |
| 222 | + m1: dim2 : dimension of b (the 2nd one): int `Integer` |
| 223 | + d: distance function |
| 224 | + G: Green's function ( the Norm) |
| 225 | +
|
| 226 | + #2.Processing: |
| 227 | + Calculates 2 sums: |
| 228 | +
|
| 229 | + 1. in the 1st loop: `partialSum` |
| 230 | + 2. 2nd loop: global sum: adds up the distance measure (between `d` and the `partialSum`) |
| 231 | +
|
| 232 | + #3. Output: |
| 233 | + returns `globalSum` |
| 234 | + |
| 235 | + """ |
| 236 | + # in the best cases : |
| 237 | + ## vanilla assumption: it's best value when : N = m1 |
| 238 | + |
| 239 | + a1 = 1 |
| 240 | + b1 = N |
| 241 | + |
| 242 | + a2 = 1 |
| 243 | + b2 = m1 |
| 244 | + |
| 245 | + partialSum = 0; globalSum =0 |
| 246 | + #loop1: depends on loop2: waits it to finish, then value is calculated: d[i] - partialSum |
| 247 | + for i in range(a1, len(a), step_size): #range(a1, b1, step_size): |
| 248 | + |
| 249 | + #loop2: calculates partial sum (Green function G , weighted with w[i] |
| 250 | + for j in range(a2, len(a), step_size): # range(a2, b2, step_size): |
| 251 | + |
| 252 | + #Issue: to resolve: give me the gist of data (so , instead of scalar, get the `container, it is contained in |
| 253 | + ##resolution: involves getting the whole set of data (and not just anamolous data like min or max ) |
| 254 | + |
| 255 | + print("w",w) |
| 256 | + print("w[i]",w[i]) |
| 257 | + print("x", x) #Scalar 9.0 |
| 258 | + print("x[i]",x[i]) |
| 259 | + print("t[j]",t[j])#<----- |
| 260 | + partialSum += w[i] * G( Norm(x[i], t[j]) , b) # <--IndexError: invalid index to scalar variable. (python means: unexpected scalar (expecting a list, not a scalar) |
| 261 | + |
| 262 | + # d[i] - Sum[j= 1, m1] w[i] * G( Norm(x[i], t[j]) ) ) #||x[i] - t[j] || ) |
| 263 | + # Calculate distance Measure, then add it to the globalSum |
| 264 | + # (d[i] - partialSum )^2 |
| 265 | + globalSum += distanceMeasure1( d[i] , partialSum ) |
| 266 | + |
| 267 | + #pass |
| 268 | + return globalSum |
| 269 | + |
| 270 | +def getDifference(a,b): |
| 271 | + """ todo: a higher order of optimization |
| 272 | + - as we are unsure whether we are at a low or height |
| 273 | +
|
| 274 | + Mini-max function would optimize the vectors a, b for us #TODO: |
| 275 | +
|
| 276 | + me: how much difference is required |
| 277 | + Say, how much difference is different? |
| 278 | +
|
| 279 | + should we really throw every out , to get what we want? |
| 280 | + or can we keep them ? |
| 281 | + """ |
| 282 | + |
| 283 | + #return max(a,b) - min(a,b)# a - b |
| 284 | + #return a - b |
| 285 | + |
| 286 | + #Common-wealth mini-max Algorithm |
| 287 | + |
| 288 | + #1. get the min vector , then maximize it |
| 289 | + # Crucial Note : assumes function is continuous , whereas values are reachable |
| 290 | + # But, otherwise, if they are not reachable, then we have to do some `fix` i.e.: |
| 291 | + ##So to avoid function issues, |
| 292 | + #Apply-Next: |
| 293 | + |
| 294 | + ##1. Instead of `max`, supremum `sup` [Maximum generalization: a function that approaches the max (from the left, or right hand side ] |
| 295 | + ##2. Instead of `min`: infimum `inf` [Minimum generalization: function that approaches the min (from left , or right hand side) ] |
| 296 | + ## Note: to do the above, an Analysis theory is required ( whether it is real , or complex ) |
| 297 | + |
| 298 | + #Attention: this line is the root of all evil: it throws the baby with its bathwater (so no lessons are more to be learned): we are dispensing valuable information [which we may later need] about our data |
| 299 | + |
| 300 | + max_a = max(a) # just get the max (of a min list ) |
| 301 | + min_b = min(b) # just get the min (of the max list ) |
| 302 | + |
| 303 | + return max_a - min_b |
| 304 | + #return max(max_a , min_b) - min( max_a , min_b )# a - b # well, it doesn't error out , it finds values, that we asked it to do ( Greedily) |
| 305 | + |
| 306 | + |
| 307 | +#Demo: say we have the following 2 1-D vectors: |
| 308 | + |
| 309 | +a=[1,2,3] |
| 310 | +b=[1,1,1] |
| 311 | + |
| 312 | +#level1: Norm Demo |
| 313 | +print("Norm value:") |
| 314 | +print( Norm(a,b) ) # normalize: get the `dot product` |
| 315 | + |
| 316 | +x= Norm(a,b) |
| 317 | + |
| 318 | +#Level2: Green's function Demo: |
| 319 | + |
| 320 | +print("Green's value:") |
| 321 | +Green(a,b) #TODO: calculate the Green's function : for demo purposes only [ calls Norm(a,b) ] |
| 322 | + |
| 323 | +#Level3: demo F_star: |
| 324 | + |
| 325 | + |
| 326 | +#full equation: f*(x) + regularization(D(F_star(x) ) |
| 327 | +""" F_star(x) #TODO: """ |
| 328 | + |
| 329 | +w = [1,1,1] |
| 330 | +m1 = 10 |
| 331 | +N = m1 |
| 332 | +#centers (from optimization) |
| 333 | +t = [0.5, 0.65, 0.51] |
| 334 | + |
| 335 | +#require: difference (between Desired vector `b` & calculate vector(by Optimization `t` ) - here:it's just a `placeholder` |
| 336 | +d = getDifference( b , t) # final output (Desired) b - calculated (from Optimization function) t |
| 337 | +#now, we can calculate F*: |
| 338 | + |
| 339 | +#set data array |
| 340 | + |
| 341 | +#1. check shape |
| 342 | +a = numpy.array(a) |
| 343 | +b = numpy.array(b) |
| 344 | +#m= b.@.a numpy.@ |
| 345 | + |
| 346 | +print("a dims = ",a.shape) |
| 347 | +print("b dims = ",b.shape) |
| 348 | + |
| 349 | +# data = numpy.concatenate( a, b ) # couldn't concatenate |
| 350 | +## print("data = ",data) |
| 351 | + |
| 352 | +##I = np.ones( a.shape, dtype= 'int') #(a,),dtype='int64') |
| 353 | +I = numpy.eye(3) |
| 354 | + |
| 355 | +#The correct way is to input the two arrays as a tuple: |
| 356 | +print("concatenate = ", numpy.concatenate( (a,a)) ) |
| 357 | + |
| 358 | +data1 = numpy.append( a,a.T) |
| 359 | +print("data1 = ", data1) |
| 360 | +data = numpy.concatenate( (a,a)) |
| 361 | + |
| 362 | +# data = numpy.append( (a, a.T) ) |
| 363 | + |
| 364 | +# data = numpy.concatenate( a, a.T ) |
| 365 | + |
| 366 | +#data = numpy.concatenate( a, b.T ) |
| 367 | + |
| 368 | +# to ease of use : set d to desired output , say `b` (i.e. desired is the second input vector |
| 369 | +d = b # # 4.0 |
| 370 | + |
| 371 | +"""Uncomment me: |
| 372 | +##but, what if: |
| 373 | +d = t # F* = 1.3652000000000002 |
| 374 | +""" |
| 375 | +#interpretation: |
| 376 | +""""when d = t |
| 377 | +F* (me: cumulative variation value of errors ) is smaller to 0 (small) |
| 378 | +""" |
| 379 | + |
| 380 | +final_result = F_star(data, N, m1, w, t, d) |
| 381 | + |
| 382 | +print("final result", final_result) # 4.0 |
| 383 | + |
| 384 | + |
| 385 | + |
| 386 | + |
0 commit comments