diff options
| author | mo khan <mo@mokhan.ca> | 2025-07-22 17:35:49 -0600 |
|---|---|---|
| committer | mo khan <mo@mokhan.ca> | 2025-07-22 17:35:49 -0600 |
| commit | 20ef0d92694465ac86b550df139e8366a0a2b4fa (patch) | |
| tree | 3f14589e1ce6eb9306a3af31c3a1f9e1af5ed637 /vendor/github.com/dalzilio/rudd/hkernel.go | |
| parent | 44e0d272c040cdc53a98b9f1dc58ae7da67752e6 (diff) | |
feat: connect to spicedb
Diffstat (limited to 'vendor/github.com/dalzilio/rudd/hkernel.go')
| -rw-r--r-- | vendor/github.com/dalzilio/rudd/hkernel.go | 216 |
1 files changed, 216 insertions, 0 deletions
diff --git a/vendor/github.com/dalzilio/rudd/hkernel.go b/vendor/github.com/dalzilio/rudd/hkernel.go new file mode 100644 index 0000000..df30547 --- /dev/null +++ b/vendor/github.com/dalzilio/rudd/hkernel.go @@ -0,0 +1,216 @@ +// Copyright (c) 2021 Silvano DAL ZILIO +// +// MIT License + +// +build !buddy + +package rudd + +import ( + "log" + "math" + "runtime" + "sync/atomic" +) + +func (b *tables) retnode(n int) Node { + if n < 0 || n > len(b.nodes) { + if _DEBUG { + log.Panicf("b.retnode(%d) not valid\n", n) + } + return nil + } + if n == 0 { + return bddzero + } + if n == 1 { + return bddone + } + x := n + if b.nodes[n].refcou < _MAXREFCOUNT { + b.nodes[n].refcou++ + runtime.SetFinalizer(&x, b.nodefinalizer) + if _DEBUG { + atomic.AddUint64(&(b.setfinalizers), 1) + if _LOGLEVEL > 2 { + log.Printf("inc refcou %d\n", n) + } + } + } + return &x +} + +func (b *tables) makenode(level int32, low int, high int, refstack []int) (int, error) { + if _DEBUG { + b.uniqueAccess++ + } + // check whether children are equal, in which case we can skip the node + if low == high { + return low, nil + } + // otherwise try to find an existing node using the unique table + if res, ok := b.nodehash(level, low, high); ok { + if _DEBUG { + b.uniqueHit++ + } + return res, nil + } + if _DEBUG { + b.uniqueMiss++ + } + // If no existing node, we build one. If there is no available spot + // (b.freepos == 0), we try garbage collection and, as a last resort, + // resizing the BDD list. + var err error + if b.freepos == 0 { + // We garbage collect unused nodes to try and find spare space. + b.gbc(refstack) + err = errReset + // We also test if we are under the threshold for resising. + if (b.freenum*100)/len(b.nodes) <= b.minfreenodes { + err = b.noderesize() + if err != errResize { + return -1, errMemory + } + } + // Panic if we still have no free positions after all this + if b.freepos == 0 { + // b.seterror("Unable to resize BDD") + return -1, errMemory + } + } + // We can now build the new node in the first available spot + b.produced++ + return b.setnode(level, low, high, 0), err +} + +func (b *tables) gbc(refstack []int) { + if _LOGLEVEL > 0 { + log.Println("starting GC") + } + + // runtime.GC() + + // we append the current stats to the GC history + if _DEBUG { + b.gcstat.history = append(b.gcstat.history, gcpoint{ + nodes: len(b.nodes), + freenodes: b.freenum, + setfinalizers: int(b.gcstat.setfinalizers), + calledfinalizers: int(b.gcstat.calledfinalizers), + }) + if _LOGLEVEL > 0 { + log.Printf("runtime.GC() reclaimed %d references\n", b.gcstat.calledfinalizers) + } + b.gcstat.setfinalizers = 0 + b.gcstat.calledfinalizers = 0 + } else { + b.gcstat.history = append(b.gcstat.history, gcpoint{ + nodes: len(b.nodes), + freenodes: b.freenum, + }) + } + // we mark the nodes in the refstack to avoid collecting them + for _, r := range refstack { + b.markrec(int(r)) + } + // we also protect nodes with a positive refcount (and therefore also the + // ones with a MAXREFCOUNT, such has variables) + for k := range b.nodes { + if b.nodes[k].refcou > 0 { + b.markrec(k) + } + } + b.freepos = 0 + b.freenum = 0 + // we do a pass through the nodes list to void the unmarked nodes. After + // finishing this pass, b.freepos points to the first free position in + // b.nodes, or it is 0 if we found none. + for n := len(b.nodes) - 1; n > 1; n-- { + if b.ismarked(n) && (b.nodes[n].low != -1) { + b.unmarknode(n) + } else { + b.delnode(b.nodes[n]) + b.nodes[n].low = -1 + b.nodes[n].high = b.freepos + b.freepos = n + b.freenum++ + } + } + // we also invalidate the caches + // b.cachereset() + if _LOGLEVEL > 0 { + log.Printf("end GC; freenum: %d\n", b.freenum) + } +} + +func (b *tables) noderesize() error { + if _LOGLEVEL > 0 { + log.Printf("start resize: %d\n", len(b.nodes)) + } + // if b.error != nil { + // b.seterror("Error before resizing; %s", b.error) + // return b.error + // } + oldsize := len(b.nodes) + nodesize := len(b.nodes) + if (oldsize >= b.maxnodesize) && (b.maxnodesize > 0) { + // b.seterror("Cannot resize BDD, already at max capacity (%d nodes)", b.maxnodesize) + return errMemory + } + if oldsize > (math.MaxInt32 >> 1) { + nodesize = math.MaxInt32 - 1 + } else { + nodesize = nodesize << 1 + } + if b.maxnodeincrease > 0 && nodesize > (oldsize+b.maxnodeincrease) { + nodesize = oldsize + b.maxnodeincrease + } + if (nodesize > b.maxnodesize) && (b.maxnodesize > 0) { + nodesize = b.maxnodesize + } + if nodesize <= oldsize { + // b.seterror("Unable to grow size of BDD (%d nodes)", nodesize) + return errMemory + } + + tmp := b.nodes + b.nodes = make([]huddnode, nodesize) + copy(b.nodes, tmp) + + for n := oldsize; n < nodesize; n++ { + b.nodes[n].refcou = 0 + b.nodes[n].level = 0 + b.nodes[n].low = -1 + b.nodes[n].high = n + 1 + } + b.nodes[nodesize-1].high = b.freepos + b.freepos = oldsize + b.freenum += (nodesize - oldsize) + + // b.cacheresize(len(b.nodes)) + + if _LOGLEVEL > 0 { + log.Printf("end resize: %d\n", len(b.nodes)) + } + + return errResize +} + +func (b *tables) markrec(n int) { + if n < 2 || b.ismarked(n) || (b.nodes[n].low == -1) { + return + } + b.marknode(n) + b.markrec(b.nodes[n].low) + b.markrec(b.nodes[n].high) +} + +func (b *tables) unmarkall() { + for k, v := range b.nodes { + if k < 2 || !b.ismarked(k) || (v.low == -1) { + continue + } + b.unmarknode(k) + } +} |
