Skip to content

Commit

Permalink
looper: supports Incr increment for each loop (default 1); stats have…
Browse files Browse the repository at this point in the history
… Di versions for storing per-di stats; log support for di data parallel -- ctx.Di set / used as appropriate.
  • Loading branch information
rcoreilly committed May 31, 2023
1 parent 4ff8208 commit d1a0b74
Show file tree
Hide file tree
Showing 13 changed files with 316 additions and 53 deletions.
27 changes: 15 additions & 12 deletions elog/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ type Context struct {
Logs *Logs `desc:"pointer to the Logs object with all log data"`
Stats *estats.Stats `desc:"pointer to stats"`
Net emer.Network `desc:"network"`
Di int `desc:"data parallel index for accessing data from network"`
Item *Item `desc:"current log Item"`
Scope etime.ScopeKey `desc:"current scope key"`
Mode etime.Modes `desc:"current scope eval mode (if standard)"`
Expand Down Expand Up @@ -217,33 +218,35 @@ func (ctx *Context) Layer(layNm string) emer.Layer {
}

// GetLayerTensor gets tensor of Unit values on a layer for given variable
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (ctx *Context) GetLayerTensor(layNm, unitVar string, di int) *etensor.Float32 {
// from current ctx.Di data parallel index.
func (ctx *Context) GetLayerTensor(layNm, unitVar string) *etensor.Float32 {
ly := ctx.Layer(layNm)
tsr := ctx.Stats.F32Tensor(layNm)
ly.UnitValsTensor(tsr, unitVar, di)
ly.UnitValsTensor(tsr, unitVar, ctx.Di)
return tsr
}

// GetLayerRepTensor gets tensor of representative Unit values on a layer for given variable
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (ctx *Context) GetLayerRepTensor(layNm, unitVar string, di int) *etensor.Float32 {
// from current ctx.Di data parallel index.
func (ctx *Context) GetLayerRepTensor(layNm, unitVar string) *etensor.Float32 {
ly := ctx.Layer(layNm)
tsr := ctx.Stats.F32Tensor(layNm)
ly.UnitValsRepTensor(tsr, unitVar, di)
ly.UnitValsRepTensor(tsr, unitVar, ctx.Di)
return tsr
}

// SetLayerTensor sets tensor of Unit values on a layer for given variable
func (ctx *Context) SetLayerTensor(layNm, unitVar string, di int) *etensor.Float32 {
tsr := ctx.GetLayerTensor(layNm, unitVar, di)
// to current ctx.Di data parallel index.
func (ctx *Context) SetLayerTensor(layNm, unitVar string) *etensor.Float32 {
tsr := ctx.GetLayerTensor(layNm, unitVar)
ctx.SetTensor(tsr)
return tsr
}

// SetLayerRepTensor sets tensor of representative Unit values on a layer for given variable
func (ctx *Context) SetLayerRepTensor(layNm, unitVar string, di int) *etensor.Float32 {
tsr := ctx.GetLayerRepTensor(layNm, unitVar, di)
// to current ctx.Di data parallel index.
func (ctx *Context) SetLayerRepTensor(layNm, unitVar string) *etensor.Float32 {
tsr := ctx.GetLayerRepTensor(layNm, unitVar)
ctx.SetTensor(tsr)
return tsr
}
Expand All @@ -252,8 +255,8 @@ func (ctx *Context) SetLayerRepTensor(layNm, unitVar string, di int) *etensor.Fl
// given layer activation pattern using given variable. Returns the row number,
// correlation value, and value of a column named namecol for that row if non-empty.
// Column must be etensor.Float32
func (ctx *Context) ClosestPat(layNm, unitVar string, di int, pats *etable.Table, colnm, namecol string) (int, float32, string) {
tsr := ctx.SetLayerTensor(layNm, unitVar, di)
func (ctx *Context) ClosestPat(layNm, unitVar string, pats *etable.Table, colnm, namecol string) (int, float32, string) {
tsr := ctx.SetLayerTensor(layNm, unitVar)
col := pats.ColByName(colnm)
// note: requires Increasing metric so using Inv
row, cor := metric.ClosestRow32(tsr, col.(*etensor.Float32), metric.InvCorrelation32)
Expand Down
19 changes: 16 additions & 3 deletions elog/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,22 +238,35 @@ func (lg *Logs) Log(mode etime.Modes, time etime.Times) *etable.Table {
// and saves data to file if open.
func (lg *Logs) LogScope(sk etime.ScopeKey) *etable.Table {
lt := lg.Tables[sk]
return lg.LogRowScope(sk, lt.Table.Rows)
return lg.LogRowScope(sk, lt.Table.Rows, 0)
}

// LogRow performs logging for given mode, time, at given row.
// Saves data to file if open.
func (lg *Logs) LogRow(mode etime.Modes, time etime.Times, row int) *etable.Table {
return lg.LogRowScope(etime.Scope(mode, time), row)
return lg.LogRowScope(etime.Scope(mode, time), row, 0)
}

// LogRowDi performs logging for given mode, time, at given row,
// using given data parallel index di, which adds to the row and all network
// access routines use this index for accessing network data.
// Saves data to file if open.
func (lg *Logs) LogRowDi(mode etime.Modes, time etime.Times, row int, di int) *etable.Table {
return lg.LogRowScope(etime.Scope(mode, time), row, di)
}

// LogRowScope performs logging for given etime.ScopeKey, at given row.
// Saves data to file if open.
func (lg *Logs) LogRowScope(sk etime.ScopeKey, row int) *etable.Table {
// di is a data parallel index, for networks capable of processing input patterns in parallel.
// effective row is row + di
func (lg *Logs) LogRowScope(sk etime.ScopeKey, row int, di int) *etable.Table {
lt := lg.Tables[sk]
dt := lt.Table
lg.Context.Di = di
if row < 0 {
row = dt.Rows
} else {
row += di
}
if dt.Rows <= row {
dt.SetNumRows(row + 1)
Expand Down
6 changes: 3 additions & 3 deletions elog/stditems.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ func (lg *Logs) RunStats(stats ...string) {
// If another item already exists for a different mode / time, this is added
// to it so there aren't any duplicate items.
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (lg *Logs) AddLayerTensorItems(net emer.Network, varNm string, di int, mode etime.Modes, etm etime.Times, layClasses ...string) {
func (lg *Logs) AddLayerTensorItems(net emer.Network, varNm string, mode etime.Modes, etm etime.Times, layClasses ...string) {
layers := net.LayersByClass(layClasses...)
for _, lnm := range layers {
clnm := lnm
Expand All @@ -342,7 +342,7 @@ func (lg *Logs) AddLayerTensorItems(net emer.Network, varNm string, di int, mode
itm, has := lg.ItemByName(itmNm)
if has {
itm.Write[etime.Scope(mode, etm)] = func(ctx *Context) {
ctx.SetLayerRepTensor(clnm, varNm, di)
ctx.SetLayerRepTensor(clnm, varNm)
}
} else {
lg.AddItem(&Item{
Expand All @@ -353,7 +353,7 @@ func (lg *Logs) AddLayerTensorItems(net emer.Network, varNm string, di int, mode
Range: minmax.F64{Max: 1},
Write: WriteMap{
etime.Scope(mode, etm): func(ctx *Context) {
ctx.SetLayerRepTensor(clnm, varNm, di)
ctx.SetLayerRepTensor(clnm, varNm)
}}})
}
}
Expand Down
10 changes: 10 additions & 0 deletions emer/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,4 +140,14 @@ type Network interface {
// space-separated strings specified in Class for parameter styling, etc.
// If no classes are passed, all layer names in order are returned.
LayersByClass(classes ...string) []string

// MaxParallelData returns the maximum number of data inputs that can be
// processed in parallel by the network.
// The NetView supports display of up to this many data elements.
MaxParallelData() int

// NParallelData returns the current number of data inputs currently being
// processed in parallel by the network.
// Logging supports recording each of these where appropriate.
NParallelData() int
}
19 changes: 18 additions & 1 deletion env/envs.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,11 @@

package env

import "github.com/emer/emergent/etime"
import (
"fmt"

"github.com/emer/emergent/etime"
)

// Envs is a map of environments organized according
// to the evaluation mode string (recommended key value)
Expand All @@ -30,3 +34,16 @@ func (es *Envs) Add(evs ...Env) {
func (es *Envs) ByMode(mode etime.Modes) Env {
return (*es)[mode.String()]
}

// ModeDi returns the string of the given mode appended with
// _di data index with leading zero.
func ModeDi(mode etime.Modes, di int) string {
return fmt.Sprintf("%s_%02d", mode.String(), di)
}

// ByModeDi returns env by etime.Modes evaluation mode and
// data parallel index as the map key, using ModeDi function.
// returns nil if not found
func (es *Envs) ByModeDi(mode etime.Modes, di int) Env {
return (*es)[ModeDi(mode, di)]
}
12 changes: 6 additions & 6 deletions estats/funcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (st *Stats) SetLayerTensor(net emer.Network, layNm, unitVar string, di int) *etensor.Float32 {
ly := net.LayerByName(layNm)
tsr := st.F32Tensor(layNm)
tsr := st.F32TensorDi(layNm, di)
ly.UnitValsTensor(tsr, unitVar, di)
return tsr
}
Expand All @@ -29,7 +29,7 @@ func (st *Stats) SetLayerTensor(net emer.Network, layNm, unitVar string, di int)
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (st *Stats) SetLayerRepTensor(net emer.Network, layNm, unitVar string, di int) *etensor.Float32 {
ly := net.LayerByName(layNm)
tsr := st.F32Tensor(layNm)
tsr := st.F32TensorDi(layNm, di)
ly.UnitValsRepTensor(tsr, unitVar, di)
return tsr
}
Expand All @@ -38,9 +38,9 @@ func (st *Stats) SetLayerRepTensor(net emer.Network, layNm, unitVar string, di i
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (st *Stats) LayerVarsCorrel(net emer.Network, layNm, unitVarA, unitVarB string, di int) float32 {
ly := net.LayerByName(layNm)
tsrA := st.F32Tensor(layNm) // standard re-used storage tensor
tsrA := st.F32TensorDi(layNm, di) // standard re-used storage tensor
ly.UnitValsTensor(tsrA, unitVarA, di)
tsrB := st.F32Tensor(layNm + "_alt") // alternative storage tensor
tsrB := st.F32TensorDi(layNm+"_alt", di) // alternative storage tensor
ly.UnitValsTensor(tsrB, unitVarB, di)
return metric.Correlation32(tsrA.Values, tsrB.Values)
}
Expand All @@ -50,9 +50,9 @@ func (st *Stats) LayerVarsCorrel(net emer.Network, layNm, unitVarA, unitVarB str
// di is a data parallel index di, for networks capable of processing input patterns in parallel.
func (st *Stats) LayerVarsCorrelRep(net emer.Network, layNm, unitVarA, unitVarB string, di int) float32 {
ly := net.LayerByName(layNm)
tsrA := st.F32Tensor(layNm) // standard re-used storage tensor
tsrA := st.F32TensorDi(layNm, di) // standard re-used storage tensor
ly.UnitValsRepTensor(tsrA, unitVarA, di)
tsrB := st.F32Tensor(layNm + "_alt") // alternative storage tensor
tsrB := st.F32TensorDi(layNm+"_alt", di) // alternative storage tensor
ly.UnitValsRepTensor(tsrB, unitVarB, di)
return metric.Correlation32(tsrA.Values, tsrB.Values)
}
Expand Down
133 changes: 133 additions & 0 deletions estats/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,9 @@ func (st *Stats) PrintVals(stats, fmts []string, delim string) string {
return str
}

//////////////////////////////////////
// Set, Get vals

// SetFloat sets Floats stat value
func (st *Stats) SetFloat(name string, value float64) {
st.Floats[name] = value
Expand Down Expand Up @@ -213,6 +216,136 @@ func (st *Stats) SetIntTensor(name string, tsr *etensor.Int) {
st.IntTensors[name] = tsr
}

//////////////////////////////////////////////
// Set, Get vals, data index versions

// DiName returns a string formatted with the given name
// appended with _di data index.
func DiName(name string, di int) string {
return fmt.Sprintf("%s_%02d", name, di)
}

// SetFloatDi sets Floats stat value
// Data parallel index version appends _di to name
func (st *Stats) SetFloatDi(name string, di int, value float64) {
st.Floats[DiName(name, di)] = value
}

// SetFloat32Di sets Floats stat value using a float32 value
// Data parallel index version appends _di to name
func (st *Stats) SetFloat32Di(name string, di int, value float32) {
st.Floats[DiName(name, di)] = float64(value)
}

// SetStringDi sets Strings stat value
// Data parallel index version appends _di to name
func (st *Stats) SetStringDi(name string, di int, value string) {
st.Strings[DiName(name, di)] = value
}

// SetIntDi sets Ints stat value
// Data parallel index version appends _di to name
func (st *Stats) SetIntDi(name string, di int, value int) {
st.Ints[DiName(name, di)] = value
}

// FloatDi returns Floats stat value -- prints error message and returns 0 if not found
// Data parallel index version appends _di to name
func (st *Stats) FloatDi(name string, di int) float64 {
val, has := st.Floats[DiName(name, di)]
if has {
return val
}
fmt.Printf("Value named: %s not found in Stats\n", name)
return 0
}

// Float32Di returns Floats stat value converted to float32.
// prints error message and returns 0 if not found
// Data parallel index version appends _di to name
func (st *Stats) Float32Di(name string, di int) float32 {
return float32(st.Float(name))
}

// StringDi returns Strings stat value -- prints error message and returns "" if not found
// Data parallel index version appends _di to name
func (st *Stats) StringDi(name string, di int) string {
val, has := st.Strings[DiName(name, di)]
if has {
return val
}
fmt.Printf("Value named: %s not found in Stats\n", name)
return ""
}

// IntDi returns Ints stat value -- prints error message and returns 0 if not found
// Data parallel index version appends _di to name
func (st *Stats) IntDi(name string, di int) int {
val, has := st.Ints[DiName(name, di)]
if has {
return val
}
fmt.Printf("Value named: %s not found in Stats\n", name)
return 0
}

// F32TensorDi returns a float32 tensor of given name, creating if not yet made
// Data parallel index version appends _di to name
func (st *Stats) F32TensorDi(name string, di int) *etensor.Float32 {
tsr, has := st.F32Tensors[DiName(name, di)]
if !has {
tsr = &etensor.Float32{}
st.F32Tensors[DiName(name, di)] = tsr
}
return tsr
}

// F64TensorDi returns a float64 tensor of given name, creating if not yet made
// Data parallel index version appends _di to name
func (st *Stats) F64TensorDi(name string, di int) *etensor.Float64 {
tsr, has := st.F64Tensors[DiName(name, di)]
if !has {
tsr = &etensor.Float64{}
st.F64Tensors[DiName(name, di)] = tsr
}
return tsr
}

// IntTensorDi returns a int tensor of given name, creating if not yet made
// Data parallel index version appends _di to name
func (st *Stats) IntTensorDi(name string, di int) *etensor.Int {
tsr, has := st.IntTensors[DiName(name, di)]
if !has {
tsr = &etensor.Int{}
st.IntTensors[DiName(name, di)] = tsr
}
return tsr
}

// SetF32TensorDi sets a float32 tensor of given name.
// Just does: st.F32Tensors[DiName(name, di)] = tsr
// Data parallel index version appends _di to name
func (st *Stats) SetF32TensorDi(name string, di int, tsr *etensor.Float32) {
st.F32Tensors[DiName(name, di)] = tsr
}

// SetF64TensorDi sets a float64 tensor of given name.
// Just does: st.F64Tensors[DiName(name, di)] = tsr
// Data parallel index version appends _di to name
func (st *Stats) SetF64TensorDi(name string, di int, tsr *etensor.Float64) {
st.F64Tensors[DiName(name, di)] = tsr
}

// SetIntTensorDi sets a int tensor of given name.
// Just does: st.IntTensors[DiName(name, di)] = tsr
// Data parallel index version appends _di to name
func (st *Stats) SetIntTensorDi(name string, di int, tsr *etensor.Int) {
st.IntTensors[DiName(name, di)] = tsr
}

/////////////////////////////////////////
// Misc items

// SimMat returns a SimMat similarity matrix of given name, creating if not yet made
func (st *Stats) SimMat(name string) *simat.SimMat {
sm, has := st.SimMats[name]
Expand Down
3 changes: 2 additions & 1 deletion looper/counter.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ package looper
type Ctr struct {
Cur int `desc:"current counter value"`
Max int `desc:"maximum counter value -- only used if > 0"`
Inc int `desc:"increment per iteration"`
}

// Incr increments the counter by 1. Does not interact with Max.
func (ct *Ctr) Incr() {
ct.Cur++
ct.Cur += ct.Inc
}

// SkipToMax sets the counter to its Max value -- for skipping over rest of loop
Expand Down
2 changes: 1 addition & 1 deletion looper/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ func (man *Manager) runLevel(currentLevel int) bool {
}

// Increment
ctr.Cur = ctr.Cur + 1
ctr.Incr()
// Reset the counter at the next level. Do this here so that the counter number is visible during loop.OnEnd.
if currentLevel+1 < len(st.Order) {
st.Loops[st.Order[currentLevel+1]].Counter.Cur = 0
Expand Down

0 comments on commit d1a0b74

Please sign in to comment.