summaryrefslogtreecommitdiff
path: root/vendor/github.com/dalzilio/rudd
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-22 17:35:49 -0600
committermo khan <mo@mokhan.ca>2025-07-22 17:35:49 -0600
commit20ef0d92694465ac86b550df139e8366a0a2b4fa (patch)
tree3f14589e1ce6eb9306a3af31c3a1f9e1af5ed637 /vendor/github.com/dalzilio/rudd
parent44e0d272c040cdc53a98b9f1dc58ae7da67752e6 (diff)
feat: connect to spicedb
Diffstat (limited to 'vendor/github.com/dalzilio/rudd')
-rw-r--r--vendor/github.com/dalzilio/rudd/.gitignore16
-rw-r--r--vendor/github.com/dalzilio/rudd/LICENSE.md21
-rw-r--r--vendor/github.com/dalzilio/rudd/NOTICE57
-rw-r--r--vendor/github.com/dalzilio/rudd/README.md187
-rw-r--r--vendor/github.com/dalzilio/rudd/bdd.go380
-rw-r--r--vendor/github.com/dalzilio/rudd/bkernel.go267
-rw-r--r--vendor/github.com/dalzilio/rudd/buddy.go237
-rw-r--r--vendor/github.com/dalzilio/rudd/cache.go392
-rw-r--r--vendor/github.com/dalzilio/rudd/config.go94
-rw-r--r--vendor/github.com/dalzilio/rudd/debug.go19
-rw-r--r--vendor/github.com/dalzilio/rudd/doc.go53
-rw-r--r--vendor/github.com/dalzilio/rudd/errors.go36
-rw-r--r--vendor/github.com/dalzilio/rudd/hkernel.go216
-rw-r--r--vendor/github.com/dalzilio/rudd/hudd.go331
-rw-r--r--vendor/github.com/dalzilio/rudd/kernel.go36
-rw-r--r--vendor/github.com/dalzilio/rudd/nodebug.go10
-rw-r--r--vendor/github.com/dalzilio/rudd/operations.go713
-rw-r--r--vendor/github.com/dalzilio/rudd/operator.go57
-rw-r--r--vendor/github.com/dalzilio/rudd/out.dot14
-rw-r--r--vendor/github.com/dalzilio/rudd/primes.go60
-rw-r--r--vendor/github.com/dalzilio/rudd/replace.go98
-rw-r--r--vendor/github.com/dalzilio/rudd/stdio.go124
22 files changed, 3418 insertions, 0 deletions
diff --git a/vendor/github.com/dalzilio/rudd/.gitignore b/vendor/github.com/dalzilio/rudd/.gitignore
new file mode 100644
index 0000000..a319b52
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+.vscode/settings.json
diff --git a/vendor/github.com/dalzilio/rudd/LICENSE.md b/vendor/github.com/dalzilio/rudd/LICENSE.md
new file mode 100644
index 0000000..6c1a2e0
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/LICENSE.md
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Silvano DAL ZILIO
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/dalzilio/rudd/NOTICE b/vendor/github.com/dalzilio/rudd/NOTICE
new file mode 100644
index 0000000..9d7f4e0
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/NOTICE
@@ -0,0 +1,57 @@
+
+Large part of the code in the RuDD library are based on the original work by
+Jorn Lind-Nielsen on the BuDDy C library. I include the original license for
+this work below, together with a list of the original authors.
+
+While the current implementation of RuDD adds some original work, I expect every
+redistribution to include the present NOTICE and acknowledge that some source
+files and examples have been copied and adapted from the *BuDDy* Binary Decision
+Diagrams Library, Package v2.4, Copyright (C) 1996-2002 by Jorn Lind-Nielsen
+(see <http://buddy.sourceforge.net/>).
+
+==========================================================================
+ *** BuDDy ***
+ Binary Decision Diagrams
+ Library Package v2.4
+--------------------------------------------------------------------------
+ Copyright (C) 1996-2002 by Jorn Lind-Nielsen
+ All rights reserved
+
+ Permission is hereby granted, without written agreement and without
+ license or royalty fees, to use, reproduce, prepare derivative
+ works, distribute, and display this software and its documentation
+ for any purpose, provided that (1) the above copyright notice and
+ the following two paragraphs appear in all copies of the source code
+ and (2) redistributions, including without limitation binaries,
+ reproduce these notices in the supporting documentation. Substantial
+ modifications to this software may be copyrighted by their authors
+ and need not follow the licensing terms described here, provided
+ that the new terms are clearly indicated in all files where they apply.
+
+ IN NO EVENT SHALL JORN LIND-NIELSEN, OR DISTRIBUTORS OF THIS
+ SOFTWARE BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
+ INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS
+ SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE AUTHORS OR ANY OF THE
+ ABOVE PARTIES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ JORN LIND-NIELSEN SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING,
+ BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO
+ OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+ MODIFICATIONS.
+==========================================================================
+
+List of authors for BuDDy:
+
+Jorn Lind-Nielsen - Original Developer
+Alan Mishchenko
+Gerd Behrmann
+Henrik Hulgaard
+Henrik Reif Andersen
+Jacob Lichtenberg
+Ken Larsen
+Nicola Soranzo
+Nikolaj Bjorner
+Alexandre Duret-Lutz
+Haim Cohen
diff --git a/vendor/github.com/dalzilio/rudd/README.md b/vendor/github.com/dalzilio/rudd/README.md
new file mode 100644
index 0000000..997cfef
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/README.md
@@ -0,0 +1,187 @@
+<!-- PROJECT LOGO -->
+<br />
+
+<p align="center">
+ <a href="https://github.com/dalzilio/rudd">
+ <img src="./docs/rudd1.png" alt="Logo" width="320">
+ </a>
+</p>
+
+# RuDD [![Go Report Card](https://goreportcard.com/badge/github.com/dalzilio/rudd)](https://goreportcard.com/report/github.com/dalzilio/rudd) [![GoDoc](https://godoc.org/github.com/dalzilio/rudd?status.svg)](https://godoc.org/github.com/dalzilio/rudd) [![Release](https://img.shields.io/github/v/release/dalzilio/rudd)](https://github.com/dalzilio/rudd/releases)
+
+RuDD is a library for Binary Decision Diagrams written in pure Go.
+
+## About
+
+RuDD is a Binary Decision Diagram (BDD) library written in pure Go, without the
+need for CGo or any other dependencies. A [BDD](https://en.wikipedia.org/wiki/Binary_decision_diagram) is a data structure
+used to efficiently represent Boolean functions or, equivalently, sets of
+Boolean vectors.
+
+It has nothing to do with Behaviour Driven Development testing!
+
+## Specific use of Go build tags
+
+We provide two possible implementations for BDD that can be selected using build
+tags. The rationale for this unorthodox use of build tags is to avoid the use of
+interfaces, and therefore dynamic dispatch, as well as to favor some automatic
+compiler optimizations (such as code inlining).
+
+Our default implementation (without any build tags) use a standard Go runtime
+hashmap to encode a "unicity table".
+
+When building your executable with the build tag `buddy` (as in `go build -tags
+buddy mycmd`) the API will switch to an implementation that is very close to the
+one of the BuDDy library; based on a specialized data-structure that mix a
+dynamic array with a hash table.
+
+To get access to better statistics about caches and garbage collection, as well as to unlock logging of some operations, you can also compile your executable with the build tag `debug`.
+
+## Relation with BuDDy and MuDDy
+
+For the most part, RuDD is a direct translation in Go of the
+[BuDDy](http://buddy.sourceforge.net/manual/) C-library developed by Jorn
+Lind-Nielsen. You can find a high-level description of the algorithms and
+data-structures used in this project by looking at ["An Introduction To Binary
+Decision Diagrams"](https://www.cs.utexas.edu/~isil/cs389L/bdd.pdf), a Research
+Report also distributed as part of the BuDDy distribution. The adaptation was
+made easy by the simplicity of its architecture (in a good sense) and the
+legibility of its code.
+
+In many places, the code of RuDD is an almost line-by-line copy of BuDDy
+(including reusing part of the same comments for documenting the code), with a
+few adaptations to follow some of Go best practices; we even implement the same
+examples than in the BuDDy distribution for benchmarks and regression testing.
+
+BuDDy is a mature software library, that has been used on several projects, with
+performances on par with more complex libraries, such as
+[CUDD](https://davidkebo.com/cudd). You can find a comparative study of the
+performances for several BDD libraries in
+[\[DHJ+2015\]](https://www.tvandijk.nl/pdf/2015setta.pdf).
+
+Like with [MuDDy](https://github.com/kfl/muddy), a ML interface to BuDDy, we
+piggyback on the garbage collection mechanism offered by our host language. We
+take care of BDD resizing and memory management directly in the library, but
+*external* references to BDD nodes made by user code are automatically managed
+by the Go runtime. Unlike MuDDy, we do not provide an interface, but a genuine
+reimplementation of BuDDy. As a consequence, we do not suffer from FFI overheads
+when calling from Go into C, which is one of the major pain points of working
+with Go.
+
+Experiences have shown that there is no significant loss of performance when
+using BuDDy from a functional language with garbage collection, compared to
+using C or C++
+[\[L09\]](https://link.springer.com/content/pdf/10.1007%2F978-3-642-03034-5_3.pdf).
+For example, we use MuDDy in the tedd model-checker provided with
+[Tina](http://projects.laas.fr/tina/) (together with other libraries for dealing
+with multi-valued decision diagrams). One of our motivations in this project is
+to see whether we can replicate this experience in Go. Our first experiments
+show very promising results, but we are still lacking a serious study of the
+performances of our library.
+
+## Installation
+
+```
+go get github.com/dalzilio/rudd
+```
+
+## Overview
+
+The main goal of RuDD is to test the performances of a lightweight BDD library
+directly implemented in Go, with a focus on implementing symbolic model-checking
+tools. At the moment, we provide only a subset of the functionalities defined in
+BuDDy, which is enough for our goals. In particular, we do not provide any
+method for the dynamic reordering of variables. We also lack support for Finite
+Domain Blocks (`fdd`) and Boolean Vectors (`bvec`).
+
+In the future, we plan to add new features to RuDD and to optimize some of its
+internals. For instance with better caching strategies or with the use of
+concurrency features. It means that the API could evolve in future releases but
+that no functions should disappear or change significantly.
+
+## Why this name
+
+The library is named after a fresh water fish, the [common
+rudd](https://en.wikipedia.org/wiki/Common_rudd) (*Scardinius
+erythrophthalmus*), or "gardon rouge" in French, that is stronger and more
+resistant that the common roach, with which it is often confused. While it is
+sometimes used as bait, its commercial interest is minor. This is certainly a
+fitting description for our code ! It is also a valid English word ending with
+DD, which is enough to justify our choice.
+
+## References
+
+You may have a look at the documentation for BuDDy (and MuDDy) to get a good
+understanding of how the library can be used.
+
+* [\[An97\]](https://www.cs.utexas.edu/~isil/cs389L/bdd.pdf) Henrik Reif
+ Andersen. *An Introduction to Binary Decision Diagrams*. Lecture Notes for a
+ course on Advanced Algorithms. Technical University of Denmark. 1997.
+
+* [\[L09\]](https://link.springer.com/content/pdf/10.1007%2F978-3-642-03034-5_3.pdf)
+ Ken Friis Larsen. [*A MuDDy Experience -– ML Bindings to a BDD
+ Library*](https://link.springer.com/chapter/10.1007/978-3-642-03034-5_3)."
+ IFIP Working Conference on Domain-Specific Languages. Springer,
+ 2009.
+
+* [\[DHJ+2015\]](https://www.tvandijk.nl/pdf/2015setta.pdf) Tom van Dijk et al.
+ *A comparative study of BDD packages for probabilistic symbolic model
+ checking.* International Symposium on Dependable Software Engineering:
+ Theories, Tools, and Applications. Springer, 2015.
+
+## Usage
+
+You can find several examples in the `*_test.go` files.
+
+To get access to better statistics about caches and garbage collection, as well
+as to unlock logging of some operations, you can compile your executable with
+the build tag `debug`, for instance with a directive such as `go run -tags debug
+mycmd`.
+
+```go
+package main
+
+import (
+ "github.com/dalzilio/rudd"
+ "math/big"
+)
+
+func main() {
+ // create a new BDD with 6 variables, 10 000 nodes and a cache size of 5 000 (initially),
+ // with an implementation based on the BuDDY approach
+ bdd := rudd.New(6, Nodesize(10000), Cachesize(5000))
+ // n1 == x2 & x3 & x5
+ n1 := bdd.Makeset([]int{2, 3, 5})
+ // n2 == x1 | !x3 | x4
+ n2 := bdd.Or(bdd.Ithvar(1), bdd.NIthvar(3), bdd.Ithvar(4))
+ // n3 == ∃ x2,x3,x5 . (n2 & x3)
+ n3 := bdd.AndExist(n1, n2, bdd.Ithvar(3))
+ // you can export a BDD in Graphviz's DOT format
+ fmt.Printf("Number of sat. assignments: %s\n", bdd.Satcount(n3))
+ fmt.Println(bdd.Stats())
+ bdd.Dot(os.Stdout)
+}
+```
+
+## Dependencies
+
+The library has no dependencies outside of the standard Go library. It uses Go
+modules and has been tested with Go 1.16.
+
+## License
+
+This software is distributed under the [MIT
+License](https://opensource.org/licenses/MIT). A copy of the license agreement
+is found in the [LICENSE](./LICENSE.md) file.
+
+The original C code for BuDDy was released under a very permissive license that
+is included in the accompanying [NOTICE](./NOTICE) file, together with a list of
+the original authors. While the current implementation of RuDD adds some
+original work, I expect every redistribution to include the present NOTICE and
+acknowledge that some source files and examples have been copied and adapted
+from the **BuDDy** Binary Decision Diagrams Library, Package v2.4, Copyright (C)
+1996-2002 by Jorn Lind-Nielsen (see <http://buddy.sourceforge.net/>).
+
+## Authors
+
+* **Silvano DAL ZILIO** - [LAAS/CNRS](https://www.laas.fr/) \ No newline at end of file
diff --git a/vendor/github.com/dalzilio/rudd/bdd.go b/vendor/github.com/dalzilio/rudd/bdd.go
new file mode 100644
index 0000000..853b6a5
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/bdd.go
@@ -0,0 +1,380 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "fmt"
+)
+
+// // BDD is an interface implementing the basic operations over Binary Decision
+// // Diagrams.
+// type BDD interface {
+// // Error returns the error status of the BDD. We return an empty string if
+// // there are no errors. Functions that return a Node result will signal an
+// // error by returning nil.
+// Error() string
+
+// // Varnum returns the number of defined variables.
+// Varnum() int
+
+// // Ithvar returns a BDD representing the i'th variable on success. The
+// // requested variable must be in the range [0..Varnum).
+// Ithvar(i int) Node
+
+// // NIthvar returns a bdd representing the negation of the i'th variable on
+// // success. See *ithvar* for further info.
+// NIthvar(i int) Node
+
+// // Low returns the false branch of a BDD or nil if there is an error.
+// Low(n Node) Node
+
+// // High returns the true branch of a BDD.
+// High(n Node) Node
+
+// // Makeset returns a node corresponding to the conjunction (the cube) of all
+// // the variables in varset, in their positive form. It is such that
+// // scanset(Makeset(a)) == a. It returns nil if one of the variables is
+// // outside the scope of the BDD (see documentation for function *Ithvar*).
+// Makeset(varset []int) Node
+
+// // Scanset returns the set of variables found when following the high branch
+// // of node n. This is the dual of function Makeset. The result may be nil if
+// // there is an error and it is an empty slice if the set is empty.
+// Scanset(n Node) []int
+
+// // Not returns the negation (!n) of expression n.
+// Not(n Node) Node
+
+// // Apply performs all of the basic binary operations on BDD nodes, such as
+// // AND, OR etc.
+// Apply(left Node, right Node, op Operator) Node
+
+// // Ite, short for if-then-else operator, computes the BDD for the expression
+// // [(f & g) | (!f & h)] more efficiently than doing the three operations
+// // separately.
+// Ite(f, g, h Node) Node
+
+// // Exist returns the existential quantification of n for the variables in
+// // varset, where varset is a node built with a method such as Makeset.
+// Exist(n, varset Node) Node
+
+// // AppEx applies the binary operator *op* on the two operands left and right
+// // then performs an existential quantification over the variables in varset,
+// // where varset is a node computed with an operation such as Makeset.
+// AppEx(left Node, right Node, op Operator, varset Node) Node
+
+// // Replace takes a renamer and computes the result of n after replacing old
+// // variables with new ones. See type Renamer.
+// Replace(n Node, r Replacer) Node
+
+// // Satcount computes the number of satisfying variable assignments for the
+// // function denoted by n. We return a result using arbitrary-precision
+// // arithmetic to avoid possible overflows. The result is zero (and we set
+// // the error flag of b) if there is an error.
+// Satcount(n Node) *big.Int
+
+// // Allsat Iterates through all legal variable assignments for n and calls
+// // the function f on each of them. We pass an int slice of length varnum to
+// // f where each entry is either 0 if the variable is false, 1 if it is true,
+// // and -1 if it is a don't care. We stop and return an error if f returns an
+// // error at some point.
+// Allsat(n Node, f func([]int) error) error
+
+// // Allnodes is similar to Allsat but iterates over all the nodes accessible
+// // from one of the parameters in n (or all the active nodes if n is absent).
+// // Function f takes the id, level, and id's of the low and high successors
+// // of each node. The two constant nodes (True and False) have always the id
+// // 1 and 0 respectively.
+// Allnodes(f func(id, level, low, high int) error, n ...Node) error
+
+// // Stats returns information about the BDD
+// Stats() string
+// }
+
+// // implementation is an unexported interface implemented by different BDD
+// // structures
+// type implementation interface {
+// // retnode creates a Node for external use and sets a finalizer on it so
+// // that the runtime can reclaim the ressource during GC.
+// retnode(n int) Node
+
+// // makenode is the only method for inserting a new BDD node
+// makenode(level int32, low, high int, refstack []int) (int, error)
+
+// // size returns the allocated size for the node table
+// size() int
+
+// // level return s the level of a node
+// level(n int) int32
+
+// low(n int) int
+
+// high(n int) int
+
+// ismarked(n int) bool
+
+// marknode(n int)
+
+// unmarknode(n int)
+
+// // allnodes applies function f over all the nodes accessible in a node table
+// allnodes(f func(id, level, low, high int) error) error
+
+// // allnodesfrom applies function f over all the nodes accessible from the nodes in n
+// allnodesfrom(f func(id, level, low, high int) error, n []Node) error
+
+// // stats return a description of the state of the node table implementation
+// stats() string
+// }
+
+// Node is a reference to an element of a BDD. It represents the atomic unit of
+// interactions and computations within a BDD.
+type Node *int
+
+// inode returns a Node for known nodes, such as variables, that do not need to
+// increase their reference count.
+func inode(n int) Node {
+ x := n
+ return &x
+}
+
+var bddone Node = inode(1)
+
+var bddzero Node = inode(0)
+
+// BDD is the type of Binary Decision Diagrams. It abstracts and encapsulates
+// the internal states of a BDD; such as caches, or the internal node and
+// unicity tables for example. We propose multiple implementations (two at the
+// moment) all based on approaches where we use integers as the key for Nodes.
+type BDD struct {
+ varnum int32 // Number of BDD variables.
+ varset [][2]int // Set of variables used for Ithvar and NIthvar: we have a pair for each variable for its positive and negative occurrence
+ refstack []int // Internal node reference stack, used to avoid collecting nodes while they are being processed.
+ error // Error status: we use nil Nodes to signal a problem and store the error in this field. This help chain operations together.
+ caches // Set of caches used for the operations in the BDD
+ *tables // Underlying struct that encapsulates the list of nodes
+}
+
+// Varnum returns the number of defined variables.
+func (b *BDD) Varnum() int {
+ return int(b.varnum)
+}
+
+func (b *BDD) makenode(level int32, low, high int) int {
+ res, err := b.tables.makenode(level, low, high, b.refstack)
+ if err == nil {
+ return res
+ }
+ if err == errReset {
+ b.cachereset()
+ return res
+ }
+ if err == errResize {
+ b.cacheresize(b.size())
+ return res
+ }
+ return res
+}
+
+// caches is a collection of caches used for operations
+type caches struct {
+ *applycache // Cache for apply results
+ *itecache // Cache for ITE results
+ *quantcache // Cache for exist/forall results
+ *appexcache // Cache for AppEx results
+ *replacecache // Cache for Replace results
+}
+
+// initref is part of three private functions to manipulate the refstack; used
+// to prevent nodes that are currently being built (e.g. transient nodes built
+// during an apply) to be reclaimed during GC.
+func (b *BDD) initref() {
+ b.refstack = b.refstack[:0]
+}
+
+func (b *BDD) pushref(n int) int {
+ b.refstack = append(b.refstack, n)
+ return n
+}
+
+func (b *BDD) popref(a int) {
+ b.refstack = b.refstack[:len(b.refstack)-a]
+}
+
+// gcstat stores status information about garbage collections. We use a stack
+// (slice) of objects to record the sequence of GC during a computation.
+type gcstat struct {
+ setfinalizers uint64 // Total number of external references to BDD nodes
+ calledfinalizers uint64 // Number of external references that were freed
+ history []gcpoint // Snaphot of GC stats at each occurrence
+}
+
+type gcpoint struct {
+ nodes int // Total number of allocated nodes in the nodetable
+ freenodes int // Number of free nodes in the nodetable
+ setfinalizers int // Total number of external references to BDD nodes
+ calledfinalizers int // Number of external references that were freed
+}
+
+// checkptr performs a sanity check prior to accessing a node and return eventual
+// error code.
+func (b *BDD) checkptr(n Node) error {
+ switch {
+ case n == nil:
+ b.seterror("Illegal acces to node (nil value)")
+ return b.error
+ case (*n < 0) || (*n >= b.size()):
+ b.seterror("Illegal acces to node %d", *n)
+ return b.error
+ case (*n >= 2) && (b.low(*n) == -1):
+ b.seterror("Illegal acces to node %d", *n)
+ return b.error
+ }
+ return nil
+}
+
+// Ithvar returns a BDD representing the i'th variable on success (the
+// expression xi), otherwise we set the error status in the BDD and returns the
+// nil node. The requested variable must be in the range [0..Varnum).
+func (b *BDD) Ithvar(i int) Node {
+ if (i < 0) || (int32(i) >= b.varnum) {
+ return b.seterror("Unknown variable used (%d) in call to ithvar", i)
+ }
+ // we do not need to reference count variables
+ return inode(b.varset[i][0])
+}
+
+// NIthvar returns a node representing the negation of the i'th variable on
+// success (the expression !xi), otherwise the nil node. See *ithvar* for
+// further info.
+func (b *BDD) NIthvar(i int) Node {
+ if (i < 0) || (int32(i) >= b.varnum) {
+ return b.seterror("Unknown variable used (%d) in call to nithvar", i)
+ }
+ // we do not need to reference count variables
+ return inode(b.varset[i][1])
+}
+
+// Label returns the variable (index) corresponding to node n in the BDD. We set
+// the BDD to its error state and return -1 if we try to access a constant node.
+func (b *BDD) Label(n Node) int {
+ if b.checkptr(n) != nil {
+ b.seterror("Illegal access to node %d in call to Label", n)
+ return -1
+ }
+ if *n < 2 {
+ b.seterror("Try to access label of constant node")
+ return -1
+ }
+ return int(b.level(*n))
+}
+
+// Low returns the false (or low) branch of a BDD. We return nil and set the
+// error flag in the BDD if there is an error.
+func (b *BDD) Low(n Node) Node {
+ if b.checkptr(n) != nil {
+ return b.seterror("Illegal access to node %d in call to Low", n)
+ }
+ return b.retnode(b.low(*n))
+}
+
+// High returns the true (or high) branch of a BDD. We return nil and set the
+// error flag in the BDD if there is an error.
+func (b *BDD) High(n Node) Node {
+ if b.checkptr(n) != nil {
+ return b.seterror("Illegal access to node %d in call to High", n)
+ }
+ return b.retnode(b.high(*n))
+}
+
+// And returns the logical 'and' of a sequence of nodes or, equivalently,
+// computes the intersection of a sequence of Boolean vectors.
+func (b *BDD) And(n ...Node) Node {
+ if len(n) == 1 {
+ return n[0]
+ }
+ if len(n) == 0 {
+ return bddone
+ }
+ return b.Apply(n[0], b.And(n[1:]...), OPand)
+}
+
+// Or returns the logical 'or' of a sequence of nodes or, equivalently, computes
+// the union of a sequence of Boolean vectors.
+func (b *BDD) Or(n ...Node) Node {
+ if len(n) == 1 {
+ return n[0]
+ }
+ if len(n) == 0 {
+ return bddzero
+ }
+ return b.Apply(n[0], b.Or(n[1:]...), OPor)
+}
+
+// Imp returns the logical 'implication' between two BDDs.
+func (b *BDD) Imp(n1, n2 Node) Node {
+ return b.Apply(n1, n2, OPimp)
+}
+
+// Equiv returns the logical 'bi-implication' between two BDDs.
+func (b *BDD) Equiv(n1, n2 Node) Node {
+ return b.Apply(n1, n2, OPbiimp)
+}
+
+// Equal tests equivalence between nodes.
+func (b *BDD) Equal(n1, n2 Node) bool {
+ if n1 == n2 {
+ return true
+ }
+ if n1 == nil || n2 == nil {
+ return false
+ }
+ return *n1 == *n2
+}
+
+// AndExist returns the "relational composition" of two nodes with respect to
+// varset, meaning the result of (∃ varset . n1 & n2).
+func (b *BDD) AndExist(varset, n1, n2 Node) Node {
+ return b.AppEx(n1, n2, OPand, varset)
+}
+
+// True returns the constant true BDD (a node pointing to the value 1). Our
+// implementation ensures that this pointer is unique. Hence two successive call
+// to True should return the same node.
+func (b *BDD) True() Node {
+ return bddone
+}
+
+// False returns the constant false BDD (a node pointing to the value 0).
+func (b *BDD) False() Node {
+ return bddzero
+}
+
+// From returns a (constant) Node from a boolean value. We return the (BDD)
+// value True if v is true and False otherwise.
+func (b *BDD) From(v bool) Node {
+ if v {
+ return bddone
+ }
+ return bddzero
+}
+
+// Stats returns information about the BDD. It is possible to print more
+// information about the caches and memory footprint of the BDD by compiling
+// your executable with the build tag 'debug'.
+func (b *BDD) Stats() string {
+ res := "==============\n"
+ res += fmt.Sprintf("Varnum: %d\n", b.varnum)
+ res += b.stats()
+ if _DEBUG {
+ res += "==============\n"
+ res += b.applycache.String()
+ res += b.itecache.String()
+ res += b.quantcache.String()
+ res += b.appexcache.String()
+ res += b.replacecache.String()
+ }
+ return res
+}
diff --git a/vendor/github.com/dalzilio/rudd/bkernel.go b/vendor/github.com/dalzilio/rudd/bkernel.go
new file mode 100644
index 0000000..4e55046
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/bkernel.go
@@ -0,0 +1,267 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+// +build buddy
+
+package rudd
+
+import (
+ "log"
+ "math"
+ "runtime"
+ "sync/atomic"
+)
+
+// retnode creates a Node for external use and sets a finalizer on it so that we
+// can reclaim the ressource during GC.
+func (b *tables) retnode(n int) Node {
+ if n < 0 || n > len(b.nodes) {
+ if _DEBUG {
+ log.Panicf("unexpected error; b.retnode(%d) not valid\n", n)
+ }
+ return nil
+ }
+ if n == 0 {
+ return bddzero
+ }
+ if n == 1 {
+ return bddone
+ }
+ x := n
+ if b.nodes[n].refcou < _MAXREFCOUNT {
+ b.nodes[n].refcou++
+ runtime.SetFinalizer(&x, b.nodefinalizer)
+ if _DEBUG {
+ atomic.AddUint64(&(b.setfinalizers), 1)
+ if _LOGLEVEL > 2 {
+ log.Printf("inc refcou %d\n", n)
+ }
+ }
+ }
+ return &x
+}
+
+func (b *tables) makenode(level int32, low, high int, refstack []int) (int, error) {
+ if _DEBUG {
+ b.uniqueAccess++
+ }
+ // check whether childs are equal or there is an error
+ if low == high {
+ return low, nil
+ }
+ // otherwise try to find an existing node using the hash and next fields
+ hash := b.nodehash(level, low, high)
+ res := b.nodes[hash].hash
+ for res != 0 {
+ if b.nodes[res].level == level && b.nodes[res].low == low && b.nodes[res].high == high {
+ if _DEBUG {
+ b.uniqueHit++
+ }
+ return res, nil
+ }
+ res = b.nodes[res].next
+ if _DEBUG {
+ b.uniqueChain++
+ }
+ }
+ if _DEBUG {
+ b.uniqueMiss++
+ }
+ // If no existing node, we build one. If there is no available spot
+ // (b.freepos == 0), we try garbage collection and, as a last resort,
+ // resizing the BDD list.
+ var err error
+ if b.freepos == 0 {
+ // We garbage collect unused nodes to try and find spare space.
+ b.gbc(refstack)
+ err = errReset
+ // We also test if we are under the threshold for resising.
+ if (b.freenum*100)/len(b.nodes) <= b.minfreenodes {
+ err = b.noderesize()
+ if err != errResize {
+ return -1, errMemory
+ }
+ hash = b.nodehash(level, low, high)
+ }
+ // Panic if we still have no free positions after all this
+ if b.freepos == 0 {
+ // b.seterror("Unable to resize BDD")
+ return -1, errMemory
+ }
+ }
+ // We can now build the new node in the first available spot
+ res = b.freepos
+ b.freepos = b.nodes[b.freepos].next
+ b.freenum--
+ b.produced++
+ b.nodes[res].level = level
+ b.nodes[res].low = low
+ b.nodes[res].high = high
+ b.nodes[res].next = b.nodes[hash].hash
+ b.nodes[hash].hash = res
+ return res, err
+}
+
+func (b *tables) noderesize() error {
+ if _LOGLEVEL > 0 {
+ log.Printf("start resize: %d\n", len(b.nodes))
+ }
+ // if b.error != nil {
+ // b.seterror("Error before resizing; %s", b.error)
+ // return b.error
+ // }
+ oldsize := len(b.nodes)
+ nodesize := len(b.nodes)
+ if (oldsize >= b.maxnodesize) && (b.maxnodesize > 0) {
+ // b.seterror("Cannot resize BDD, already at max capacity (%d nodes)", b.maxnodesize)
+ return errMemory
+ }
+ if oldsize > (math.MaxInt32 >> 1) {
+ nodesize = math.MaxInt32 - 1
+ } else {
+ nodesize = nodesize << 1
+ }
+ if b.maxnodeincrease > 0 && nodesize > (oldsize+b.maxnodeincrease) {
+ nodesize = oldsize + b.maxnodeincrease
+ }
+ if (nodesize > b.maxnodesize) && (b.maxnodesize > 0) {
+ nodesize = b.maxnodesize
+ }
+ nodesize = primeLte(nodesize)
+ if nodesize <= oldsize {
+ // b.seterror("Unable to grow size of BDD (%d nodes)", nodesize)
+ return errMemory
+ }
+
+ tmp := b.nodes
+ b.nodes = make([]buddynode, nodesize)
+ copy(b.nodes, tmp)
+
+ for n := 0; n < oldsize; n++ {
+ b.nodes[n].hash = 0
+ }
+ for n := oldsize; n < nodesize; n++ {
+ b.nodes[n].refcou = 0
+ b.nodes[n].hash = 0
+ b.nodes[n].level = 0
+ b.nodes[n].low = -1
+ b.nodes[n].next = n + 1
+ }
+ b.nodes[nodesize-1].next = b.freepos
+ b.freepos = oldsize
+ b.freenum += (nodesize - oldsize)
+
+ // We recompute the hashes since nodesize is modified.
+ b.freepos = 0
+ b.freenum = 0
+ for n := nodesize - 1; n > 1; n-- {
+ if b.nodes[n].low != -1 {
+ hash := b.ptrhash(n)
+ b.nodes[n].next = b.nodes[hash].hash
+ b.nodes[hash].hash = n
+ } else {
+ b.nodes[n].next = b.freepos
+ b.freepos = n
+ b.freenum++
+ }
+ }
+
+ if _LOGLEVEL > 0 {
+ log.Printf("end resize: %d\n", len(b.nodes))
+ }
+ // b.cacheresize(len(b.nodes))
+ return errResize
+}
+
+// gbc is the garbage collector called for reclaiming memory, inside a call to
+// makenode, when there are no free positions available. Allocated nodes that
+// are not reclaimed do not move.
+func (b *tables) gbc(refstack []int) {
+ if _LOGLEVEL > 0 {
+ log.Println("starting GC")
+ }
+
+ // We could explicitly ask the system to run its GC so that we can
+ // decrement the ref counts of Nodes that had an external reference. This is
+ // blocking. Frequent GC is time consuming, but with fewer GC we can
+ // experience more resizing events.
+ //
+ // FIXME: maybe add a gotask that does a runtime GC after a few resizing
+ // and/or a fixed time if we have too many gbc
+ //
+ // runtime.GC()
+
+ // we append the current stats to the GC history
+ if _DEBUG {
+ b.gcstat.history = append(b.gcstat.history, gcpoint{
+ nodes: len(b.nodes),
+ freenodes: b.freenum,
+ setfinalizers: int(b.gcstat.setfinalizers),
+ calledfinalizers: int(b.gcstat.calledfinalizers),
+ })
+ if _LOGLEVEL > 0 {
+ log.Printf("runtime.GC() reclaimed %d references\n", b.gcstat.calledfinalizers)
+ }
+ b.gcstat.setfinalizers = 0
+ b.gcstat.calledfinalizers = 0
+ } else {
+ b.gcstat.history = append(b.gcstat.history, gcpoint{
+ nodes: len(b.nodes),
+ freenodes: b.freenum,
+ })
+ }
+ // we mark the nodes in the refstack to avoid collecting them
+ for _, r := range refstack {
+ b.markrec(int(r))
+ }
+ // we also protect nodes with a positive refcount (and therefore also the
+ // ones with a MAXREFCOUNT, such has variables)
+ for k := range b.nodes {
+ if b.nodes[k].refcou > 0 {
+ b.markrec(k)
+ }
+ b.nodes[k].hash = 0
+ }
+ b.freepos = 0
+ b.freenum = 0
+ // we do a pass through the nodes list to update the hash chains and void
+ // the unmarked nodes. After finishing this pass, b.freepos points to the
+ // first free position in b.nodes, or it is 0 if we found none.
+ for n := len(b.nodes) - 1; n > 1; n-- {
+ if b.ismarked(n) && (b.nodes[n].low != -1) {
+ b.unmarknode(n)
+ hash := b.ptrhash(int(n))
+ b.nodes[n].next = b.nodes[hash].hash
+ b.nodes[hash].hash = int(n)
+ } else {
+ b.nodes[n].low = -1
+ b.nodes[n].next = b.freepos
+ b.freepos = n
+ b.freenum++
+ }
+ }
+ // we also invalidate the caches
+ // b.cachereset()
+ if _LOGLEVEL > 0 {
+ log.Printf("end GC; freenum: %d\n", b.freenum)
+ }
+}
+
+func (b *tables) markrec(n int) {
+ if n < 2 || b.ismarked(n) || (b.nodes[n].low == -1) {
+ return
+ }
+ b.marknode(n)
+ b.markrec(b.nodes[n].low)
+ b.markrec(b.nodes[n].high)
+}
+
+func (b *tables) unmarkall() {
+ for k, v := range b.nodes {
+ if k < 2 || !b.ismarked(k) || (v.low == -1) {
+ continue
+ }
+ b.unmarknode(k)
+ }
+}
diff --git a/vendor/github.com/dalzilio/rudd/buddy.go b/vendor/github.com/dalzilio/rudd/buddy.go
new file mode 100644
index 0000000..df7d0df
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/buddy.go
@@ -0,0 +1,237 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+// +build buddy
+
+package rudd
+
+import (
+ "fmt"
+ "log"
+ "sync/atomic"
+ "unsafe"
+)
+
+// tables is used with the build tag buddy and corresponds to Binary Decision
+// Diagrams based on the data structures and algorithms found in the BuDDy
+// library.
+type tables struct {
+ nodes []buddynode // List of all the BDD nodes. Constants are always kept at index 0 and 1
+ freenum int // Number of free nodes
+ freepos int // First free node
+ produced int // Total number of new nodes ever produced
+ nodefinalizer interface{} // Finalizer used to decrement the ref count of external references
+ uniqueAccess int // accesses to the unique node table
+ uniqueChain int // iterations through the cache chains in the unique node table
+ uniqueHit int // entries actually found in the the unique node table
+ uniqueMiss int // entries not found in the the unique node table
+ gcstat // Information about garbage collections
+ configs // Configurable parameters
+}
+
+type buddynode struct {
+ refcou int32 // Count the number of external references
+ level int32 // Order of the variable in the BDD
+ low int // Reference to the false branch
+ high int // Reference to the true branch
+ hash int // Index where to (possibly) find node with this hash value
+ next int // Next index to check in case of a collision, 0 if last
+}
+
+func (b *tables) ismarked(n int) bool {
+ return (b.nodes[n].level & 0x200000) != 0
+}
+
+func (b *tables) marknode(n int) {
+ b.nodes[n].level = b.nodes[n].level | 0x200000
+}
+
+func (b *tables) unmarknode(n int) {
+ b.nodes[n].level = b.nodes[n].level & 0x1FFFFF
+}
+
+// The hash function for nodes is #(level, low, high)
+
+func (b *tables) ptrhash(n int) int {
+ return _TRIPLE(int(b.nodes[n].level), b.nodes[n].low, b.nodes[n].high, len(b.nodes))
+}
+
+func (b *tables) nodehash(level int32, low, high int) int {
+ return _TRIPLE(int(level), low, high, len(b.nodes))
+}
+
+// New returns a new BDD based on the implementation selected with the build
+// tag; meaning the 'Hudd'-style BDD by default (based on the standard runtime
+// hashmap) or a 'BuDDy'-style BDD if tags buddy is set. Parameter varnum is the
+// number of variables in the BDD.
+//
+// It is possible to set optional (configuration) parameters, such as the size
+// of the initial node table (Nodesize) or the size for caches (Cachesize),
+// using configs functions. The initial number of nodes is not critical since
+// the table will be resized whenever there are too few nodes left after a
+// garbage collection. But it does have some impact on the efficiency of the
+// operations. We return a nil value if there is an error while creating the
+// BDD.
+func New(varnum int, options ...func(*configs)) (*BDD, error) {
+ b := &BDD{}
+ if (varnum < 1) || (varnum > int(_MAXVAR)) {
+ b.seterror("bad number of variable (%d)", varnum)
+ return nil, b.error
+ }
+ config := makeconfigs(varnum)
+ for _, f := range options {
+ f(config)
+ }
+ b.varnum = int32(varnum)
+ if _LOGLEVEL > 0 {
+ log.Printf("set varnum to %d\n", b.varnum)
+ }
+ b.varset = make([][2]int, varnum)
+ // We also initialize the refstack.
+ b.refstack = make([]int, 0, 2*varnum+4)
+ b.initref()
+ b.error = nil
+ impl := &tables{}
+ impl.minfreenodes = config.minfreenodes
+ impl.maxnodeincrease = config.maxnodeincrease
+ nodesize := primeGte(config.nodesize)
+ impl.nodes = make([]buddynode, nodesize)
+ for k := range impl.nodes {
+ impl.nodes[k] = buddynode{
+ refcou: 0,
+ level: 0,
+ low: -1,
+ high: 0,
+ hash: 0,
+ next: k + 1,
+ }
+ }
+ impl.nodes[nodesize-1].next = 0
+ impl.nodes[0].refcou = _MAXREFCOUNT
+ impl.nodes[1].refcou = _MAXREFCOUNT
+ impl.nodes[0].low = 0
+ impl.nodes[0].high = 0
+ impl.nodes[1].low = 1
+ impl.nodes[1].high = 1
+ impl.nodes[0].level = int32(config.varnum)
+ impl.nodes[1].level = int32(config.varnum)
+ impl.freepos = 2
+ impl.freenum = nodesize - 2
+ impl.gcstat.history = []gcpoint{}
+ impl.nodefinalizer = func(n *int) {
+ if _DEBUG {
+ atomic.AddUint64(&(impl.gcstat.calledfinalizers), 1)
+ if _LOGLEVEL > 2 {
+ log.Printf("dec refcou %d\n", *n)
+ }
+ }
+ impl.nodes[*n].refcou--
+ }
+ for k := 0; k < config.varnum; k++ {
+ v0, _ := impl.makenode(int32(k), 0, 1, nil)
+ if v0 < 0 {
+ b.seterror("cannot allocate new variable %d in setVarnum", k)
+ return nil, b.error
+ }
+ impl.nodes[v0].refcou = _MAXREFCOUNT
+ b.pushref(v0)
+ v1, _ := impl.makenode(int32(k), 1, 0, nil)
+ if v1 < 0 {
+ b.seterror("cannot allocate new variable %d in setVarnum", k)
+ return nil, b.error
+ }
+ impl.nodes[v1].refcou = _MAXREFCOUNT
+ b.popref(1)
+ b.varset[k] = [2]int{v0, v1}
+ }
+ b.tables = impl
+ b.cacheinit(config)
+ return b, nil
+}
+
+func (b *tables) size() int {
+ return len(b.nodes)
+}
+
+func (b *tables) level(n int) int32 {
+ return b.nodes[n].level
+}
+
+func (b *tables) low(n int) int {
+ return b.nodes[n].low
+}
+
+func (b *tables) high(n int) int {
+ return b.nodes[n].high
+}
+
+func (b *tables) allnodesfrom(f func(id, level, low, high int) error, n []Node) error {
+ for _, v := range n {
+ b.markrec(*v)
+ }
+ // if err := f(0, int(b.nodes[0].level), 0, 0); err != nil {
+ // b.unmarkall()
+ // return err
+ // }
+ // if err := f(1, int(b.nodes[1].level), 1, 1); err != nil {
+ // b.unmarkall()
+ // return err
+ // }
+ for k := range b.nodes {
+ if b.ismarked(k) {
+ b.unmarknode(k)
+ if err := f(k, int(b.nodes[k].level), b.nodes[k].low, b.nodes[k].high); err != nil {
+ b.unmarkall()
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (b *tables) allnodes(f func(id, level, low, high int) error) error {
+ // if err := f(0, int(b.nodes[0].level), 0, 0); err != nil {
+ // return err
+ // }
+ // if err := f(1, int(b.nodes[1].level), 1, 1); err != nil {
+ // return err
+ // }
+ for k, v := range b.nodes {
+ if v.low != -1 {
+ if err := f(k, int(v.level), v.low, v.high); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Stats returns information about the BDD
+func (b *tables) stats() string {
+ res := "Impl.: BuDDy\n"
+ res += fmt.Sprintf("Allocated: %d (%s)\n", len(b.nodes), humanSize(len(b.nodes), unsafe.Sizeof(buddynode{})))
+ res += fmt.Sprintf("Produced: %d\n", b.produced)
+ r := (float64(b.freenum) / float64(len(b.nodes))) * 100
+ res += fmt.Sprintf("Free: %d (%.3g %%)\n", b.freenum, r)
+ res += fmt.Sprintf("Used: %d (%.3g %%)\n", len(b.nodes)-b.freenum, (100.0 - r))
+ res += "==============\n"
+ res += fmt.Sprintf("# of GC: %d\n", len(b.gcstat.history))
+ if _DEBUG {
+ allocated := int(b.gcstat.setfinalizers)
+ reclaimed := int(b.gcstat.calledfinalizers)
+ for _, g := range b.gcstat.history {
+ allocated += g.setfinalizers
+ reclaimed += g.calledfinalizers
+ }
+ res += fmt.Sprintf("Ext. refs: %d\n", allocated)
+ res += fmt.Sprintf("Reclaimed: %d\n", reclaimed)
+ res += "==============\n"
+ res += fmt.Sprintf("Unique Access: %d\n", b.uniqueAccess)
+ res += fmt.Sprintf("Unique Chain: %d\n", b.uniqueChain)
+ res += fmt.Sprintf("Unique Hit: %d (%.1f%% + %.1f%%)\n", b.uniqueHit, (float64(b.uniqueHit)*100)/float64(b.uniqueAccess),
+ (float64(b.uniqueAccess-b.uniqueMiss-b.uniqueHit)*100)/float64(b.uniqueAccess))
+ res += fmt.Sprintf("Unique Miss: %d\n", b.uniqueMiss)
+ }
+ return res
+}
diff --git a/vendor/github.com/dalzilio/rudd/cache.go b/vendor/github.com/dalzilio/rudd/cache.go
new file mode 100644
index 0000000..cdb727c
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/cache.go
@@ -0,0 +1,392 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "fmt"
+ "math"
+ "unsafe"
+)
+
+// Hash functions
+
+func _TRIPLE(a, b, c, len int) int {
+ return int(_PAIR(c, _PAIR(a, b, len), len))
+}
+
+// _PAIR is a mapping function that maps (bijectively) a pair of integer (a, b)
+// into a unique integer then cast it into a value in the interval [0..len)
+// using a modulo operation.
+func _PAIR(a, b, len int) int {
+ ua := uint64(a)
+ ub := uint64(b)
+ return int(((((ua + ub) * (ua + ub + 1)) / 2) + (ua)) % uint64(len))
+}
+
+// Hash value modifiers for replace/compose
+const cacheidREPLACE int = 0x0
+
+// const cacheid_COMPOSE int = 0x1
+// const cacheid_VECCOMPOSE int = 0x2
+
+// Hash value modifiers for quantification
+const cacheidEXIST int = 0x0
+const cacheidAPPEX int = 0x3
+
+// const cacheid_FORALL int = 0x1
+// const cacheid_UNIQUE int = 0x2
+// const cacheid_APPAL int = 0x4
+// const cacheid_APPUN int = 0x5
+
+type data4n struct {
+ res int
+ a int
+ b int
+ c int
+}
+
+type data4ncache struct {
+ ratio int
+ opHit int // entries found in the caches
+ opMiss int // entries not found in the caches
+ table []data4n
+}
+
+func (bc *data4ncache) init(size, ratio int) {
+ size = primeGte(size)
+ bc.table = make([]data4n, size)
+ bc.ratio = ratio
+ bc.reset()
+}
+
+func (bc *data4ncache) resize(size int) {
+ if bc.ratio > 0 {
+ size = primeGte((size * bc.ratio) / 100)
+ bc.table = make([]data4n, size)
+ }
+ bc.reset()
+}
+
+func (bc *data4ncache) reset() {
+ for k := range bc.table {
+ bc.table[k].a = -1
+ }
+}
+
+// cache3n is used for caching replace operations
+type data3ncache struct {
+ ratio int
+ opHit int // entries found in the replace cache
+ opMiss int // entries not found in the replace cache
+ table []data3n
+}
+
+type data3n struct {
+ res int
+ a int
+ c int
+}
+
+func (bc *data3ncache) init(size, ratio int) {
+ size = primeGte(size)
+ bc.table = make([]data3n, size)
+ bc.ratio = ratio
+ bc.reset()
+}
+
+func (bc *data3ncache) resize(size int) {
+ if bc.ratio > 0 {
+ size = primeGte((size * bc.ratio) / 100)
+ bc.table = make([]data3n, size)
+ }
+ bc.reset()
+}
+
+func (bc *data3ncache) reset() {
+ for k := range bc.table {
+ bc.table[k].a = -1
+ }
+}
+
+// Setup and shutdown
+
+func (b *BDD) cacheinit(c *configs) {
+ size := 10000
+ if c.cachesize != 0 {
+ size = c.cachesize
+ }
+ size = primeGte(size)
+ b.applycache = &applycache{}
+ b.applycache.init(size, c.cacheratio)
+ b.itecache = &itecache{}
+ b.itecache.init(size, c.cacheratio)
+ b.quantcache = &quantcache{}
+ b.quantcache.init(size, c.cacheratio)
+ b.quantset = make([]int32, b.varnum)
+ b.quantsetID = 0
+ b.appexcache = &appexcache{}
+ b.appexcache.init(size, c.cacheratio)
+ b.replacecache = &replacecache{}
+ b.replacecache.init(size, c.cacheratio)
+}
+
+func (b *BDD) cachereset() {
+ b.applycache.reset()
+ b.itecache.reset()
+ b.quantcache.reset()
+ b.appexcache.reset()
+ b.replacecache.reset()
+}
+
+func (b *BDD) cacheresize(nodesize int) {
+ b.applycache.resize(nodesize)
+ b.itecache.resize(nodesize)
+ b.quantcache.resize(nodesize)
+ b.appexcache.resize(nodesize)
+ b.replacecache.resize(nodesize)
+}
+
+//
+// Quantification Cache
+//
+
+// quantset2cache takes a variable list, similar to the ones generated with
+// Makeset, and set the variables in the quantification cache.
+func (b *BDD) quantset2cache(n int) error {
+ if n < 2 {
+ b.seterror("Illegal variable (%d) in varset to cache", n)
+ return b.error
+ }
+ b.quantsetID++
+ if b.quantsetID == math.MaxInt32 {
+ b.quantset = make([]int32, b.varnum)
+ b.quantsetID = 1
+ }
+ for i := n; i > 1; i = b.high(i) {
+ b.quantset[b.level(i)] = b.quantsetID
+ b.quantlast = b.level(i)
+ }
+ return nil
+}
+
+// The hash function for Apply is #(left, right, applycache.op).
+
+type applycache struct {
+ data4ncache
+ op int // Current operation during an apply
+}
+
+func (bc *applycache) matchapply(left, right int) int {
+ entry := bc.table[_TRIPLE(left, right, bc.op, len(bc.table))]
+ if entry.a == left && entry.b == right && entry.c == bc.op {
+ if _DEBUG {
+ bc.opHit++
+ }
+ return entry.res
+ }
+ if _DEBUG {
+ bc.opMiss++
+ }
+ return -1
+}
+
+func (bc *applycache) setapply(left, right, res int) int {
+ bc.table[_TRIPLE(left, right, bc.op, len(bc.table))] = data4n{
+ a: left,
+ b: right,
+ c: bc.op,
+ res: res,
+ }
+ return res
+}
+
+// The hash function for operation Not(n) is simply n.
+
+func (bc *applycache) matchnot(n int) int {
+ entry := bc.table[n%len(bc.table)]
+ if entry.a == n && entry.c == int(opnot) {
+ if _DEBUG {
+ bc.opHit++
+ }
+ return entry.res
+ }
+ if _DEBUG {
+ bc.opMiss++
+ }
+ return -1
+}
+
+func (bc *applycache) setnot(n, res int) int {
+ bc.table[n%len(bc.table)] = data4n{
+ a: n,
+ c: int(opnot),
+ res: res,
+ }
+ return res
+}
+
+func (bc applycache) String() string {
+ res := fmt.Sprintf("== Apply cache %d (%s)\n", len(bc.table), humanSize(len(bc.table), unsafe.Sizeof(data4n{})))
+ res += fmt.Sprintf(" Operator Hits: %d (%.1f%%)\n", bc.opHit, (float64(bc.opHit)*100)/(float64(bc.opHit)+float64(bc.opMiss)))
+ res += fmt.Sprintf(" Operator Miss: %d\n", bc.opMiss)
+ return res
+}
+
+// The hash function for ITE is #(f,g,h), so we need to cache 4 node positions
+// per entry.
+
+type itecache struct {
+ data4ncache
+}
+
+func (bc *itecache) matchite(f, g, h int) int {
+ entry := bc.table[_TRIPLE(f, g, h, len(bc.table))]
+ if entry.a == f && entry.b == g && entry.c == h {
+ if _DEBUG {
+ bc.opHit++
+ }
+ return entry.res
+ }
+ if _DEBUG {
+ bc.opMiss++
+ }
+ return -1
+}
+
+func (bc *itecache) setite(f, g, h, res int) int {
+ bc.table[_TRIPLE(f, g, h, len(bc.table))] = data4n{
+ a: f,
+ b: g,
+ c: h,
+ res: res,
+ }
+ return res
+}
+
+func (bc itecache) String() string {
+ res := fmt.Sprintf("== ITE cache %d (%s)\n", len(bc.table), humanSize(len(bc.table), unsafe.Sizeof(data4n{})))
+ res += fmt.Sprintf(" Operator Hits: %d (%.1f%%)\n", bc.opHit, (float64(bc.opHit)*100)/(float64(bc.opHit)+float64(bc.opMiss)))
+ res += fmt.Sprintf(" Operator Miss: %d\n", bc.opMiss)
+ return res
+}
+
+// The hash function for quantification is (n, varset, quantid).
+
+type quantcache struct {
+ data4ncache // Cache for exist/forall results
+ quantset []int32 // Current variable set for quant.
+ quantsetID int32 // Current id used in quantset
+ quantlast int32 // Current last variable to be quant.
+ id int // Current cache id for quantifications
+}
+
+func (bc *quantcache) matchquant(n, varset int) int {
+ entry := bc.table[_PAIR(n, varset, len(bc.table))]
+ if entry.a == n && entry.b == varset && entry.c == bc.id {
+ if _DEBUG {
+ bc.opHit++
+ }
+ return entry.res
+ }
+ if _DEBUG {
+ bc.opMiss++
+ }
+ return -1
+}
+
+func (bc *quantcache) setquant(n, varset, res int) int {
+ bc.table[_PAIR(n, varset, len(bc.table))] = data4n{
+ a: n,
+ b: varset,
+ c: bc.id,
+ res: res,
+ }
+ return res
+}
+
+func (bc quantcache) String() string {
+ res := fmt.Sprintf("== Quant cache %d (%s)\n", len(bc.table), humanSize(len(bc.table), unsafe.Sizeof(data4n{})))
+ res += fmt.Sprintf(" Operator Hits: %d (%.1f%%)\n", bc.opHit, (float64(bc.opHit)*100)/(float64(bc.opHit)+float64(bc.opMiss)))
+ res += fmt.Sprintf(" Operator Miss: %d\n", bc.opMiss)
+ return res
+}
+
+// The hash function for AppEx is #(left, right, varset << 2 | appexcache.op )
+// so we can use the same cache for several operators.
+
+// appexcache are a mix of quant and apply caches
+type appexcache struct {
+ data4ncache // Cache for appex/appall results
+ op int // Current operator for appex
+ id int // Current id
+}
+
+func (bc *appexcache) matchappex(left, right int) int {
+ entry := bc.table[_TRIPLE(left, right, bc.id, len(bc.table))]
+ if entry.a == left && entry.b == right && entry.c == bc.id {
+ if _DEBUG {
+ bc.opHit++
+ }
+ return entry.res
+ }
+ if _DEBUG {
+ bc.opMiss++
+ }
+ return -1
+}
+
+func (bc *appexcache) setappex(left, right, res int) int {
+ bc.table[_TRIPLE(left, right, bc.id, len(bc.table))] = data4n{
+ a: left,
+ b: right,
+ c: bc.id,
+ res: res,
+ }
+ return res
+}
+
+func (bc appexcache) String() string {
+ res := fmt.Sprintf("== AppEx cache %d (%s)\n", len(bc.table), humanSize(len(bc.table), unsafe.Sizeof(data4n{})))
+ res += fmt.Sprintf(" Operator Hits: %d (%.1f%%)\n", bc.opHit, (float64(bc.opHit)*100)/(float64(bc.opHit)+float64(bc.opMiss)))
+ res += fmt.Sprintf(" Operator Miss: %d\n", bc.opMiss)
+ return res
+}
+
+// The hash function for operation Replace(n) is simply n.
+
+type replacecache struct {
+ data3ncache // Cache for replace results
+ id int // Current cache id for replace
+}
+
+func (bc *replacecache) matchreplace(n int) int {
+ entry := bc.table[n%len(bc.table)]
+ if entry.a == n && entry.c == bc.id {
+ if _DEBUG {
+ bc.opHit++
+ }
+ return entry.res
+ }
+ if _DEBUG {
+ bc.opMiss++
+ }
+ return -1
+}
+
+func (bc *replacecache) setreplace(n, res int) int {
+ bc.table[n%len(bc.table)] = data3n{
+ a: n,
+ c: bc.id,
+ res: res,
+ }
+ return res
+}
+
+func (bc replacecache) String() string {
+ res := fmt.Sprintf("== Replace %d (%s)\n", len(bc.table), humanSize(len(bc.table), unsafe.Sizeof(data3n{})))
+ res += fmt.Sprintf(" Operator Hits: %d (%.1f%%)\n", bc.opHit, (float64(bc.opHit)*100)/(float64(bc.opHit)+float64(bc.opMiss)))
+ res += fmt.Sprintf(" Operator Miss: %d\n", bc.opMiss)
+ return res
+}
diff --git a/vendor/github.com/dalzilio/rudd/config.go b/vendor/github.com/dalzilio/rudd/config.go
new file mode 100644
index 0000000..67264fc
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/config.go
@@ -0,0 +1,94 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+// configs is used to store the values of different parameters of the BDD
+type configs struct {
+ varnum int // number of BDD variables
+ nodesize int // initial number of nodes in the table
+ cachesize int // initial cache size (general)
+ cacheratio int // initial ratio (general, 0 if size constant) between cache size and node table
+ maxnodesize int // Maximum total number of nodes (0 if no limit)
+ maxnodeincrease int // Maximum number of nodes that can be added to the table at each resize (0 if no limit)
+ minfreenodes int // Minimum number of nodes that should be left after GC before triggering a resize
+}
+
+func makeconfigs(varnum int) *configs {
+ c := &configs{varnum: varnum}
+ c.minfreenodes = _MINFREENODES
+ c.maxnodeincrease = _DEFAULTMAXNODEINC
+ // we build enough nodes to include all the variables in varset
+ c.nodesize = 2*varnum + 2
+ return c
+}
+
+// Nodesize is a configuration option (function). Used as a parameter in New it
+// sets a preferred initial size for the node table. The size of the BDD can
+// increase during computation. By default we create a table large enough to
+// include the two constants and the "variables" used in the call to Ithvar and
+// NIthvar.
+func Nodesize(size int) func(*configs) {
+ return func(c *configs) {
+ if size >= 2*c.varnum+2 {
+ c.nodesize = size
+ }
+ }
+}
+
+// Maxnodesize is a configuration option (function). Used as a parameter in New
+// it sets a limit to the number of nodes in the BDD. An operation trying to
+// raise the number of nodes above this limit will generate an error and return
+// a nil Node. The default value (0) means that there is no limit. In which case
+// allocation can panic if we exhaust all the available memory.
+func Maxnodesize(size int) func(*configs) {
+ return func(c *configs) {
+ c.maxnodesize = size
+ }
+}
+
+// Maxnodeincrease is a configuration option (function). Used as a parameter in
+// New it sets a limit on the increase in size of the node table. Below this
+// limit we typically double the size of the node list each time we need to
+// resize it. The default value is about a million nodes. Set the value to zero
+// to avoid imposing a limit.
+func Maxnodeincrease(size int) func(*configs) {
+ return func(c *configs) {
+ c.maxnodeincrease = size
+ }
+}
+
+// Minfreenodes is a configuration option (function). Used as a parameter in New
+// it sets the ratio of free nodes (%) that has to be left after a Garbage
+// Collection event. When there is not enough free nodes in the BDD, we try
+// reclaiming unused nodes. With a ratio of, say 25, we resize the table if the
+// number a free nodes is less than 25% of the capacity of the table (see
+// Maxnodesize and Maxnodeincrease). The default value is 20%.
+func Minfreenodes(ratio int) func(*configs) {
+ return func(c *configs) {
+ c.minfreenodes = ratio
+ }
+}
+
+// Cachesize is a configuration option (function). Used as a parameter in New it
+// sets the initial number of entries in the operation caches. The default value
+// is 10 000. Typical values for nodesize are 10 000 nodes for small test
+// examples and up to 1 000 000 nodes for large examples. See also the
+// Cacheratio config.
+func Cachesize(size int) func(*configs) {
+ return func(c *configs) {
+ c.cachesize = size
+ }
+}
+
+// Cacheratio is a configuration option (function). Used as a parameter in New
+// it sets a "cache ratio" (%) so that caches can grow each time we resize the
+// node table. With a cache ratio of r, we have r available entries in the cache
+// for every 100 slots in the node table. (A typical value for the cache ratio
+// is 25% or 20%). The default value (0) means that the cache size never grows.
+func Cacheratio(ratio int) func(*configs) {
+ return func(c *configs) {
+ c.cacheratio = ratio
+ }
+}
diff --git a/vendor/github.com/dalzilio/rudd/debug.go b/vendor/github.com/dalzilio/rudd/debug.go
new file mode 100644
index 0000000..6341c98
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/debug.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+// +build debug
+
+package rudd
+
+import (
+ "log"
+ "os"
+)
+
+const _DEBUG bool = true
+const _LOGLEVEL int = 1
+
+func init() {
+ log.SetOutput(os.Stdout)
+}
diff --git a/vendor/github.com/dalzilio/rudd/doc.go b/vendor/github.com/dalzilio/rudd/doc.go
new file mode 100644
index 0000000..216b6d6
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/doc.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+/*
+Package rudd defines a concrete type for Binary Decision Diagrams (BDD), a data
+structure used to efficiently represent Boolean functions over a fixed set of
+variables or, equivalently, sets of Boolean vectors with a fixed size.
+
+Basics
+
+Each BDD has a fixed number of variables, Varnum, declared when it is
+initialized (using the method New) and each variable is represented by an
+(integer) index in the interval [0..Varnum), called a level. Our library support
+the creation of multiple BDD with possibly different number of variables.
+
+Most operations over BDD return a Node; that is a pointer to a "vertex" in the
+BDD that includes a variable level, and the address of the low and high branch
+for this node. We use integer to represent the address of Nodes, with the
+convention that 1 (respectively 0) is the address of the constant function True
+(respectively False).
+
+Use of build tags
+
+For the most part, data structures and algorithms implemented in this library
+are a direct adaptation of those found in the C-library BuDDy, developed by
+Jorn Lind-Nielsen; we even implemented the same examples than in the BuDDy
+distribution for benchmarks and regression testing. We provide two possible
+implementations for BDD that can be selected using build tags.
+
+Our default implementation (without build tag) use a standard Go runtime hashmap
+to encode a "unicity table".
+
+When building your executable with the build tag `buddy`, the API will switch to
+an implementation that is very close to the one of the BuDDy library; based on a
+specialized data-structure that mix a dynamic array with a hash table.
+
+To get access to better statistics about caches and garbage collection, as well
+as to unlock logging of some operations, you can also compile your executable
+with the build tag `debug`.
+
+Automatic memory management
+
+The library is written in pure Go, without the need for CGo or any other
+dependencies. Like with MuDDy, a ML interface to BuDDy, we piggyback on the
+garbage collection mechanism offered by our host language (in our case Go). We
+take care of BDD resizing and memory management directly in the library, but
+"external" references to BDD nodes made by user code are automatically managed
+by the Go runtime. Unlike MuDDy, we do not provide an interface, but a genuine
+reimplementation of BDD in Go. As a consequence, we do not suffer from FFI
+overheads when calling from Go into C.
+*/
+package rudd
diff --git a/vendor/github.com/dalzilio/rudd/errors.go b/vendor/github.com/dalzilio/rudd/errors.go
new file mode 100644
index 0000000..7db6976
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/errors.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "fmt"
+ "log"
+)
+
+// Error returns the error status of the BDD.
+func (b *BDD) Error() string {
+ if b.error == nil {
+ return ""
+ }
+ return b.error.Error()
+}
+
+// Errored returns true if there was an error during a computation.
+func (b *BDD) Errored() bool {
+ return b.error != nil
+}
+
+func (b *BDD) seterror(format string, a ...interface{}) Node {
+ if b.error != nil {
+ format = format + "; " + b.Error()
+ b.error = fmt.Errorf(format, a...)
+ return nil
+ }
+ b.error = fmt.Errorf(format, a...)
+ if _DEBUG {
+ log.Println(b.error)
+ }
+ return nil
+}
diff --git a/vendor/github.com/dalzilio/rudd/hkernel.go b/vendor/github.com/dalzilio/rudd/hkernel.go
new file mode 100644
index 0000000..df30547
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/hkernel.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+// +build !buddy
+
+package rudd
+
+import (
+ "log"
+ "math"
+ "runtime"
+ "sync/atomic"
+)
+
+func (b *tables) retnode(n int) Node {
+ if n < 0 || n > len(b.nodes) {
+ if _DEBUG {
+ log.Panicf("b.retnode(%d) not valid\n", n)
+ }
+ return nil
+ }
+ if n == 0 {
+ return bddzero
+ }
+ if n == 1 {
+ return bddone
+ }
+ x := n
+ if b.nodes[n].refcou < _MAXREFCOUNT {
+ b.nodes[n].refcou++
+ runtime.SetFinalizer(&x, b.nodefinalizer)
+ if _DEBUG {
+ atomic.AddUint64(&(b.setfinalizers), 1)
+ if _LOGLEVEL > 2 {
+ log.Printf("inc refcou %d\n", n)
+ }
+ }
+ }
+ return &x
+}
+
+func (b *tables) makenode(level int32, low int, high int, refstack []int) (int, error) {
+ if _DEBUG {
+ b.uniqueAccess++
+ }
+ // check whether children are equal, in which case we can skip the node
+ if low == high {
+ return low, nil
+ }
+ // otherwise try to find an existing node using the unique table
+ if res, ok := b.nodehash(level, low, high); ok {
+ if _DEBUG {
+ b.uniqueHit++
+ }
+ return res, nil
+ }
+ if _DEBUG {
+ b.uniqueMiss++
+ }
+ // If no existing node, we build one. If there is no available spot
+ // (b.freepos == 0), we try garbage collection and, as a last resort,
+ // resizing the BDD list.
+ var err error
+ if b.freepos == 0 {
+ // We garbage collect unused nodes to try and find spare space.
+ b.gbc(refstack)
+ err = errReset
+ // We also test if we are under the threshold for resising.
+ if (b.freenum*100)/len(b.nodes) <= b.minfreenodes {
+ err = b.noderesize()
+ if err != errResize {
+ return -1, errMemory
+ }
+ }
+ // Panic if we still have no free positions after all this
+ if b.freepos == 0 {
+ // b.seterror("Unable to resize BDD")
+ return -1, errMemory
+ }
+ }
+ // We can now build the new node in the first available spot
+ b.produced++
+ return b.setnode(level, low, high, 0), err
+}
+
+func (b *tables) gbc(refstack []int) {
+ if _LOGLEVEL > 0 {
+ log.Println("starting GC")
+ }
+
+ // runtime.GC()
+
+ // we append the current stats to the GC history
+ if _DEBUG {
+ b.gcstat.history = append(b.gcstat.history, gcpoint{
+ nodes: len(b.nodes),
+ freenodes: b.freenum,
+ setfinalizers: int(b.gcstat.setfinalizers),
+ calledfinalizers: int(b.gcstat.calledfinalizers),
+ })
+ if _LOGLEVEL > 0 {
+ log.Printf("runtime.GC() reclaimed %d references\n", b.gcstat.calledfinalizers)
+ }
+ b.gcstat.setfinalizers = 0
+ b.gcstat.calledfinalizers = 0
+ } else {
+ b.gcstat.history = append(b.gcstat.history, gcpoint{
+ nodes: len(b.nodes),
+ freenodes: b.freenum,
+ })
+ }
+ // we mark the nodes in the refstack to avoid collecting them
+ for _, r := range refstack {
+ b.markrec(int(r))
+ }
+ // we also protect nodes with a positive refcount (and therefore also the
+ // ones with a MAXREFCOUNT, such has variables)
+ for k := range b.nodes {
+ if b.nodes[k].refcou > 0 {
+ b.markrec(k)
+ }
+ }
+ b.freepos = 0
+ b.freenum = 0
+ // we do a pass through the nodes list to void the unmarked nodes. After
+ // finishing this pass, b.freepos points to the first free position in
+ // b.nodes, or it is 0 if we found none.
+ for n := len(b.nodes) - 1; n > 1; n-- {
+ if b.ismarked(n) && (b.nodes[n].low != -1) {
+ b.unmarknode(n)
+ } else {
+ b.delnode(b.nodes[n])
+ b.nodes[n].low = -1
+ b.nodes[n].high = b.freepos
+ b.freepos = n
+ b.freenum++
+ }
+ }
+ // we also invalidate the caches
+ // b.cachereset()
+ if _LOGLEVEL > 0 {
+ log.Printf("end GC; freenum: %d\n", b.freenum)
+ }
+}
+
+func (b *tables) noderesize() error {
+ if _LOGLEVEL > 0 {
+ log.Printf("start resize: %d\n", len(b.nodes))
+ }
+ // if b.error != nil {
+ // b.seterror("Error before resizing; %s", b.error)
+ // return b.error
+ // }
+ oldsize := len(b.nodes)
+ nodesize := len(b.nodes)
+ if (oldsize >= b.maxnodesize) && (b.maxnodesize > 0) {
+ // b.seterror("Cannot resize BDD, already at max capacity (%d nodes)", b.maxnodesize)
+ return errMemory
+ }
+ if oldsize > (math.MaxInt32 >> 1) {
+ nodesize = math.MaxInt32 - 1
+ } else {
+ nodesize = nodesize << 1
+ }
+ if b.maxnodeincrease > 0 && nodesize > (oldsize+b.maxnodeincrease) {
+ nodesize = oldsize + b.maxnodeincrease
+ }
+ if (nodesize > b.maxnodesize) && (b.maxnodesize > 0) {
+ nodesize = b.maxnodesize
+ }
+ if nodesize <= oldsize {
+ // b.seterror("Unable to grow size of BDD (%d nodes)", nodesize)
+ return errMemory
+ }
+
+ tmp := b.nodes
+ b.nodes = make([]huddnode, nodesize)
+ copy(b.nodes, tmp)
+
+ for n := oldsize; n < nodesize; n++ {
+ b.nodes[n].refcou = 0
+ b.nodes[n].level = 0
+ b.nodes[n].low = -1
+ b.nodes[n].high = n + 1
+ }
+ b.nodes[nodesize-1].high = b.freepos
+ b.freepos = oldsize
+ b.freenum += (nodesize - oldsize)
+
+ // b.cacheresize(len(b.nodes))
+
+ if _LOGLEVEL > 0 {
+ log.Printf("end resize: %d\n", len(b.nodes))
+ }
+
+ return errResize
+}
+
+func (b *tables) markrec(n int) {
+ if n < 2 || b.ismarked(n) || (b.nodes[n].low == -1) {
+ return
+ }
+ b.marknode(n)
+ b.markrec(b.nodes[n].low)
+ b.markrec(b.nodes[n].high)
+}
+
+func (b *tables) unmarkall() {
+ for k, v := range b.nodes {
+ if k < 2 || !b.ismarked(k) || (v.low == -1) {
+ continue
+ }
+ b.unmarknode(k)
+ }
+}
diff --git a/vendor/github.com/dalzilio/rudd/hudd.go b/vendor/github.com/dalzilio/rudd/hudd.go
new file mode 100644
index 0000000..cec631d
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/hudd.go
@@ -0,0 +1,331 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+//go:build !buddy
+// +build !buddy
+
+package rudd
+
+import (
+ "fmt"
+ "log"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+// tables corresponds to Binary Decision Diagrams based on the runtime
+// hashmap. We hash a triplet (level, low, high) to a []byte and use the unique
+// table to associate this triplet to an entry in the nodes table. We use more
+// space but a benefit is that we can easily migrate to a concurrency-safe
+// hashmap if we want to test concurrent data structures.
+type tables struct {
+ sync.RWMutex
+ nodes []huddnode // List of all the BDD nodes. Constants are always kept at index 0 and 1
+ unique map[[huddsize]byte]int // Unicity table, used to associate each triplet to a single node
+ freenum int // Number of free nodes
+ freepos int // First free node
+ produced int // Total number of new nodes ever produced
+ hbuff [huddsize]byte // Used to compute the hash of nodes. A Buffer needs no initialization.
+ nodefinalizer interface{} // Finalizer used to decrement the ref count of external references
+ uniqueAccess int // accesses to the unique node table
+ uniqueHit int // entries actually found in the the unique node table
+ uniqueMiss int // entries not found in the the unique node table
+ gcstat // Information about garbage collections
+ configs // Configurable parameters
+}
+
+type huddnode struct {
+ level int32 // Order of the variable in the BDD
+ low int // Reference to the false branch
+ high int // Reference to the true branch
+ refcou int32 // Count the number of external references
+}
+
+func (b *tables) ismarked(n int) bool {
+ b.RLock()
+ defer b.RUnlock()
+ return (b.nodes[n].refcou & 0x200000) != 0
+}
+
+func (b *tables) marknode(n int) {
+ b.RLock()
+ defer b.RUnlock()
+ b.nodes[n].refcou |= 0x200000
+}
+
+func (b *tables) unmarknode(n int) {
+ b.RLock()
+ defer b.RUnlock()
+ b.nodes[n].refcou &= 0x1FFFFF
+}
+
+// New returns a new BDD based on an implementation selected with the build tag;
+// meaning the 'Hudd'-style BDD by default (based on the standard runtime
+// hashmap) or a 'BuDDy'-style BDD if tags buddy is set. Parameter varnum is the
+// number of variables in the BDD.
+//
+// It is possible to set optional (configuration) parameters, such as the size
+// of the initial node table (Nodesize) or the size for caches (Cachesize),
+// using configs functions. The initial number of nodes is not critical since
+// the table will be resized whenever there are too few nodes left after a
+// garbage collection. But it does have some impact on the efficiency of the
+// operations. We return a nil value if there is an error while creating the
+// BDD.
+func New(varnum int, options ...func(*configs)) (*BDD, error) {
+ b := &BDD{}
+ if (varnum < 1) || (varnum > int(_MAXVAR)) {
+ b.seterror("bad number of variable (%d)", varnum)
+ return nil, b.error
+ }
+ config := makeconfigs(varnum)
+ for _, f := range options {
+ f(config)
+ }
+ b.varnum = int32(varnum)
+ if _LOGLEVEL > 0 {
+ log.Printf("set varnum to %d\n", b.varnum)
+ }
+ b.varset = make([][2]int, varnum)
+ // We also initialize the refstack.
+ b.refstack = make([]int, 0, 2*varnum+4)
+ b.initref()
+ b.error = nil
+ impl := &tables{}
+ impl.minfreenodes = config.minfreenodes
+ impl.maxnodeincrease = config.maxnodeincrease
+ // initializing the list of nodes
+ nodesize := config.nodesize
+ impl.nodes = make([]huddnode, nodesize)
+ for k := range impl.nodes {
+ impl.nodes[k] = huddnode{
+ level: 0,
+ low: -1,
+ high: k + 1,
+ refcou: 0,
+ }
+ }
+ impl.nodes[nodesize-1].high = 0
+ impl.unique = make(map[[huddsize]byte]int, nodesize)
+ // creating bddzero and bddone. We do not add them to the unique table.
+ impl.nodes[0] = huddnode{
+ level: int32(config.varnum),
+ low: 0,
+ high: 0,
+ refcou: _MAXREFCOUNT,
+ }
+ impl.nodes[1] = huddnode{
+ level: int32(config.varnum),
+ low: 1,
+ high: 1,
+ refcou: _MAXREFCOUNT,
+ }
+ impl.freepos = 2
+ impl.freenum = len(impl.nodes) - 2
+ for k := 0; k < config.varnum; k++ {
+ v0, _ := impl.makenode(int32(k), 0, 1, nil)
+ if v0 < 0 {
+ b.seterror("cannot allocate new variable %d in setVarnum", k)
+ return nil, b.error
+ }
+ impl.nodes[v0].refcou = _MAXREFCOUNT
+ b.pushref(v0)
+ v1, _ := impl.makenode(int32(k), 1, 0, nil)
+ if v1 < 0 {
+ b.seterror("cannot allocate new variable %d in setVarnum", k)
+ return nil, b.error
+ }
+ impl.nodes[v1].refcou = _MAXREFCOUNT
+ b.popref(1)
+ b.varset[k] = [2]int{v0, v1}
+ }
+ impl.gcstat.history = []gcpoint{}
+ impl.nodefinalizer = func(n *int) {
+ b.Lock()
+ defer b.Unlock()
+ if _DEBUG {
+ atomic.AddUint64(&(impl.gcstat.calledfinalizers), 1)
+ if _LOGLEVEL > 2 {
+ log.Printf("dec refcou %d\n", *n)
+ }
+ }
+ impl.nodes[*n].refcou--
+ }
+ b.tables = impl
+ b.cacheinit(config)
+ return b, nil
+}
+
+func (b *tables) huddhash(level int32, low, high int) {
+ b.hbuff[0] = byte(level)
+ b.hbuff[1] = byte(level >> 8)
+ b.hbuff[2] = byte(level >> 16)
+ b.hbuff[3] = byte(level >> 24)
+ b.hbuff[4] = byte(low)
+ b.hbuff[5] = byte(low >> 8)
+ b.hbuff[6] = byte(low >> 16)
+ b.hbuff[7] = byte(low >> 24)
+ if huddsize == 20 {
+ // 64 bits machine
+ b.hbuff[8] = byte(low >> 32)
+ b.hbuff[9] = byte(low >> 40)
+ b.hbuff[10] = byte(low >> 48)
+ b.hbuff[11] = byte(low >> 56)
+ b.hbuff[12] = byte(high)
+ b.hbuff[13] = byte(high >> 8)
+ b.hbuff[14] = byte(high >> 16)
+ b.hbuff[15] = byte(high >> 24)
+ b.hbuff[16] = byte(high >> 32)
+ b.hbuff[17] = byte(high >> 40)
+ b.hbuff[18] = byte(high >> 48)
+ b.hbuff[19] = byte(high >> 56)
+ return
+ }
+ // 32 bits machine
+ b.hbuff[8] = byte(high)
+ b.hbuff[9] = byte(high >> 8)
+ b.hbuff[10] = byte(high >> 16)
+ b.hbuff[11] = byte(high >> 24)
+}
+
+func (b *tables) nodehash(level int32, low, high int) (int, bool) {
+ b.huddhash(level, low, high)
+ hn, ok := b.unique[b.hbuff]
+ return hn, ok
+}
+
+// When a slot is unused in b.nodes, we have low set to -1 and high set to the
+// next free position. The value of b.freepos gives the index of the lowest
+// unused slot, except when freenum is 0, in which case it is also 0.
+
+func (b *tables) setnode(level int32, low int, high int, count int32) int {
+ b.Lock()
+ defer b.Unlock()
+ b.huddhash(level, low, high)
+ b.freenum--
+ b.unique[b.hbuff] = b.freepos
+ res := b.freepos
+ b.freepos = b.nodes[b.freepos].high
+ b.nodes[res] = huddnode{level, low, high, count}
+ return res
+}
+
+func (b *tables) delnode(hn huddnode) {
+ b.huddhash(hn.level, hn.low, hn.high)
+ delete(b.unique, b.hbuff)
+}
+
+func (b *tables) size() int {
+ b.RLock()
+ defer b.RUnlock()
+ return len(b.nodes)
+}
+
+func (b *tables) level(n int) int32 {
+ b.RLock()
+ defer b.RUnlock()
+ return b.nodes[n].level
+}
+
+func (b *tables) low(n int) int {
+ b.RLock()
+ defer b.RUnlock()
+ return b.nodes[n].low
+}
+
+func (b *tables) high(n int) int {
+ b.RLock()
+ defer b.RUnlock()
+ return b.nodes[n].high
+}
+
+func (b *tables) allnodesfrom(f func(id, level, low, high int) error, n []Node) error {
+ for _, v := range n {
+ b.markrec(*v)
+ }
+ // if err := f(0, int(b.nodes[0].level), 0, 0); err != nil {
+ // b.unmarkall()
+ // return err
+ // }
+ // if err := f(1, int(b.nodes[1].level), 1, 1); err != nil {
+ // b.unmarkall()
+ // return err
+ // }
+ b.RLock()
+ count := len(b.nodes)
+ b.RUnlock()
+
+ for k := 0; k < count; k++ {
+ b.RLock()
+ if k >= len(b.nodes) {
+ break
+ }
+ b.RUnlock()
+ if b.ismarked(k) {
+ b.unmarknode(k)
+ if err := f(k, int(b.nodes[k].level), b.nodes[k].low, b.nodes[k].high); err != nil {
+ b.unmarkall()
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (b *tables) allnodes(f func(id, level, low, high int) error) error {
+ // if err := f(0, int(b.nodes[0].level), 0, 0); err != nil {
+ // return err
+ // }
+ // if err := f(1, int(b.nodes[1].level), 1, 1); err != nil {
+ // return err
+ // }
+ b.RLock()
+ count := len(b.nodes)
+ b.RUnlock()
+
+ for k := 0; k < count; k++ {
+ b.RLock()
+ if k >= len(b.nodes) {
+ break
+ }
+ v := b.nodes[k]
+ b.RUnlock()
+ if v.low != -1 {
+ if err := f(k, int(v.level), v.low, v.high); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// stats returns information about the implementation
+func (b *tables) stats() string {
+ b.RLock()
+ defer b.RUnlock()
+ res := "Impl.: Hudd\n"
+ res += fmt.Sprintf("Allocated: %d (%s)\n", len(b.nodes), humanSize(len(b.nodes), unsafe.Sizeof(huddnode{})))
+ res += fmt.Sprintf("Produced: %d\n", b.produced)
+ r := (float64(b.freenum) / float64(len(b.nodes))) * 100
+ res += fmt.Sprintf("Free: %d (%.3g %%)\n", b.freenum, r)
+ res += fmt.Sprintf("Used: %d (%.3g %%)\n", len(b.nodes)-b.freenum, (100.0 - r))
+ res += "==============\n"
+ res += fmt.Sprintf("# of GC: %d\n", len(b.gcstat.history))
+ if _DEBUG {
+ allocated := int(b.gcstat.setfinalizers)
+ reclaimed := int(b.gcstat.calledfinalizers)
+ for _, g := range b.gcstat.history {
+ allocated += g.setfinalizers
+ reclaimed += g.calledfinalizers
+ }
+ res += fmt.Sprintf("Ext. refs: %d\n", allocated)
+ res += fmt.Sprintf("Reclaimed: %d\n", reclaimed)
+ res += "==============\n"
+ res += fmt.Sprintf("Unique Access: %d\n", b.uniqueAccess)
+ res += fmt.Sprintf("Unique Hit: %d (%.1f%% + %.1f%%)\n", b.uniqueHit, (float64(b.uniqueHit)*100)/float64(b.uniqueAccess),
+ (float64(b.uniqueAccess-b.uniqueMiss-b.uniqueHit)*100)/float64(b.uniqueAccess))
+ res += fmt.Sprintf("Unique Miss: %d\n", b.uniqueMiss)
+ }
+ return res
+}
diff --git a/vendor/github.com/dalzilio/rudd/kernel.go b/vendor/github.com/dalzilio/rudd/kernel.go
new file mode 100644
index 0000000..214df37
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/kernel.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "errors"
+)
+
+// number of bytes in a int (adapted from uintSize in the math/bits package)
+const huddsize = (2*(32<<(^uint(0)>>32&1)) + 32) / 8 // 12 (32 bits) or 20 (64 bits)
+
+// _MINFREENODES is the minimal number of nodes (%) that has to be left after a
+// garbage collect unless a resize should be done.
+const _MINFREENODES int = 20
+
+// _MAXVAR is the maximal number of levels in the BDD. We use only the first 21
+// bits for encoding levels (so also the max number of variables). We use 11
+// other bits for markings. Hence we make sure to always use int32 to avoid
+// problem when we change architecture.
+const _MAXVAR int32 = 0x1FFFFF
+
+// _MAXREFCOUNT is the maximal value of the reference counter (refcou), also
+// used to stick nodes (like constants and variables) in the node list. It is
+// egal to 1023 (10 bits).
+const _MAXREFCOUNT int32 = 0x3FF
+
+// _DEFAULTMAXNODEINC is the default value for the maximal increase in the
+// number of nodes during a resize. It is approx. one million nodes (1 048 576)
+// (could be interesting to change it to 1 << 23 = 8 388 608).
+const _DEFAULTMAXNODEINC int = 1 << 20
+
+var errMemory = errors.New("unable to free memory or resize BDD")
+var errResize = errors.New("should cache resize") // when gbc and then noderesize
+var errReset = errors.New("should cache reset") // when gbc only, without resizing
diff --git a/vendor/github.com/dalzilio/rudd/nodebug.go b/vendor/github.com/dalzilio/rudd/nodebug.go
new file mode 100644
index 0000000..a7b1f01
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/nodebug.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+// +build !debug
+
+package rudd
+
+const _DEBUG bool = false
+const _LOGLEVEL int = 0
diff --git a/vendor/github.com/dalzilio/rudd/operations.go b/vendor/github.com/dalzilio/rudd/operations.go
new file mode 100644
index 0000000..0455572
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/operations.go
@@ -0,0 +1,713 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "fmt"
+ "log"
+ "math/big"
+)
+
+// Scanset returns the set of variables (levels) found when following the high
+// branch of node n. This is the dual of function Makeset. The result may be nil
+// if there is an error and it is sorted following the natural order between
+// levels.
+func (b *BDD) Scanset(n Node) []int {
+ if b.checkptr(n) != nil {
+ return nil
+ }
+ if *n < 2 {
+ return nil
+ }
+ res := []int{}
+ for i := *n; i > 1; i = b.high(i) {
+ res = append(res, int(b.level(i)))
+ }
+ return res
+}
+
+// Makeset returns a node corresponding to the conjunction (the cube) of all the
+// variable in varset, in their positive form. It is such that
+// scanset(Makeset(a)) == a. It returns False and sets the error condition in b
+// if one of the variables is outside the scope of the BDD (see documentation
+// for function *Ithvar*).
+func (b *BDD) Makeset(varset []int) Node {
+ res := bddone
+ for _, level := range varset {
+ // FIXME: should find a way to do it without adding references
+ tmp := b.Apply(res, b.Ithvar(level), OPand)
+ if b.error != nil {
+ return bddzero
+ }
+ res = tmp
+ }
+ return res
+}
+
+// Not returns the negation of the expression corresponding to node n; it
+// computes the result of !n. We negate a BDD by exchanging all references to
+// the zero-terminal with references to the one-terminal and vice versa.
+func (b *BDD) Not(n Node) Node {
+ if b.checkptr(n) != nil {
+ return b.seterror("Wrong operand in call to Not (%d)", *n)
+ }
+ b.initref()
+ b.pushref(*n)
+ res := b.not(*n)
+ b.popref(1)
+ return b.retnode(res)
+}
+
+func (b *BDD) not(n int) int {
+ if n == 0 {
+ return 1
+ }
+ if n == 1 {
+ return 0
+ }
+ // The hash for a not operation is simply n
+ if res := b.matchnot(n); res >= 0 {
+ return res
+ }
+ low := b.pushref(b.not(b.low(n)))
+ high := b.pushref(b.not(b.high(n)))
+ res := b.makenode(b.level(n), low, high)
+ b.popref(2)
+ return b.setnot(n, res)
+}
+
+// Apply performs all of the basic bdd operations with two operands, such as
+// AND, OR etc. Operator opr must be one of the following:
+//
+// Identifier Description Truth table
+//
+// OPand logical and [0,0,0,1]
+// OPxor logical xor [0,1,1,0]
+// OPor logical or [0,1,1,1]
+// OPnand logical not-and [1,1,1,0]
+// OPnor logical not-or [1,0,0,0]
+// OPimp implication [1,1,0,1]
+// OPbiimp equivalence [1,0,0,1]
+// OPdiff set difference [0,0,1,0]
+// OPless less than [0,1,0,0]
+// OPinvimp reverse implication [1,0,1,1]
+func (b *BDD) Apply(n1, n2 Node, op Operator) Node {
+ if b.checkptr(n1) != nil {
+ return b.seterror("Wrong operand in call to Apply %s(n1: %d, n2: ...)", op, *n1)
+ }
+ if b.checkptr(n2) != nil {
+ return b.seterror("Wrong operand in call to Apply %s(n1: ..., n2: %d)", op, *n2)
+ }
+ b.applycache.op = int(op)
+ b.initref()
+ b.pushref(*n1)
+ b.pushref(*n2)
+ res := b.apply(*n1, *n2)
+ b.popref(2)
+ return b.retnode(res)
+}
+
+func (b *BDD) apply(left int, right int) int {
+ switch Operator(b.applycache.op) {
+ case OPand:
+ if left == right {
+ return left
+ }
+ if (left == 0) || (right == 0) {
+ return 0
+ }
+ if left == 1 {
+ return right
+ }
+ if right == 1 {
+ return left
+ }
+ case OPor:
+ if left == right {
+ return left
+ }
+ if (left == 1) || (right == 1) {
+ return 1
+ }
+ if left == 0 {
+ return right
+ }
+ if right == 0 {
+ return left
+ }
+ case OPxor:
+ if left == right {
+ return 0
+ }
+ if left == 0 {
+ return right
+ }
+ if right == 0 {
+ return left
+ }
+ case OPnand:
+ if (left == 0) || (right == 0) {
+ return 1
+ }
+ case OPnor:
+ if (left == 1) || (right == 1) {
+ return 0
+ }
+ case OPimp:
+ if left == 0 {
+ return 1
+ }
+ if left == 1 {
+ return right
+ }
+ if right == 1 {
+ return 1
+ }
+ if left == right {
+ return 1
+ }
+ case OPbiimp:
+ if left == right {
+ return 1
+ }
+ if left == 1 {
+ return right
+ }
+ if right == 1 {
+ return left
+ }
+ case OPdiff:
+ if left == right {
+ return 0
+ }
+ if right == 1 {
+ return 0
+ }
+ if left == 0 {
+ return right
+ }
+ case OPless:
+ if (left == right) || (left == 1) {
+ return 0
+ }
+ if left == 0 {
+ return right
+ }
+ case OPinvimp:
+ if right == 0 {
+ return 1
+ }
+ if right == 1 {
+ return left
+ }
+ if left == 1 {
+ return 1
+ }
+ if left == right {
+ return 1
+ }
+ default:
+ // unary operations, OPnot and OPsimplify, should not be used in apply
+ b.seterror("Unauthorized operation (%s) in apply", Operator(b.applycache.op))
+ return -1
+ }
+
+ // we check for errors
+ if left < 0 || right < 0 {
+ if _DEBUG {
+ log.Panicf("panic in apply(%d,%d,%s)\n", left, right, Operator(b.applycache.op))
+ }
+ return -1
+ }
+
+ // we deal with the other cases where the two operands are constants
+ if (left < 2) && (right < 2) {
+ return opres[b.applycache.op][left][right]
+ }
+ if res := b.matchapply(left, right); res >= 0 {
+ return res
+ }
+ leftlvl := b.level(left)
+ rightlvl := b.level(right)
+ var res int
+ if leftlvl == rightlvl {
+ low := b.pushref(b.apply(b.low(left), b.low(right)))
+ high := b.pushref(b.apply(b.high(left), b.high(right)))
+ res = b.makenode(leftlvl, low, high)
+ } else {
+ if leftlvl < rightlvl {
+ low := b.pushref(b.apply(b.low(left), right))
+ high := b.pushref(b.apply(b.high(left), right))
+ res = b.makenode(leftlvl, low, high)
+ } else {
+ low := b.pushref(b.apply(left, b.low(right)))
+ high := b.pushref(b.apply(left, b.high(right)))
+ res = b.makenode(rightlvl, low, high)
+ }
+ }
+ b.popref(2)
+ return b.setapply(left, right, res)
+}
+
+// Ite (short for if-then-else operator) computes the BDD for the expression [(f
+// & g) | (!f & h)] more efficiently than doing the three operations separately.
+func (b *BDD) Ite(f, g, h Node) Node {
+ if b.checkptr(f) != nil {
+ return b.seterror("Wrong operand in call to Ite (f: %d)", *f)
+ }
+ if b.checkptr(g) != nil {
+ return b.seterror("Wrong operand in call to Ite (g: %d)", *g)
+ }
+ if b.checkptr(h) != nil {
+ return b.seterror("Wrong operand in call to Ite (h: %d)", *h)
+ }
+ b.initref()
+ b.pushref(*f)
+ b.pushref(*g)
+ b.pushref(*h)
+ res := b.ite(*f, *g, *h)
+ b.popref(3)
+ return b.retnode(res)
+}
+
+// iteLow returns p if p is strictly higher than q or r, otherwise it returns
+// p.low. This is used in function ite to know which node to follow: we always
+// follow the smallest(s) nodes.
+func (b *BDD) iteLow(p, q, r int32, n int) int {
+ if (p > q) || (p > r) {
+ return n
+ }
+ return b.low(n)
+}
+
+func (b *BDD) iteHigh(p, q, r int32, n int) int {
+ if (p > q) || (p > r) {
+ return n
+ }
+ return b.high(n)
+}
+
+// min3 returns the smallest value between p, q and r. This is used in function
+// ite to compute the smallest level.
+func min3(p, q, r int32) int32 {
+ if p <= q {
+ if p <= r { // p <= q && p <= r
+ return p
+ }
+ return r // r < p <= q
+ }
+ if q <= r { // q < p && q <= r
+ return q
+ }
+ return r // r < q < p
+}
+
+func (b *BDD) ite(f, g, h int) int {
+ switch {
+ case f == 1:
+ return g
+ case f == 0:
+ return h
+ case g == h:
+ return g
+ case (g == 1) && (h == 0):
+ return f
+ case (g == 0) && (h == 1):
+ return b.not(f)
+ }
+ // we check for possible errors
+ if f < 0 || g < 0 || h < 0 {
+ b.seterror("unexpected error in ite")
+ if _DEBUG {
+ log.Panicf("panic in ite(%d,%d,%d)\n", f, g, h)
+ }
+ return -1
+ }
+ if res := b.matchite(f, g, h); res >= 0 {
+ return res
+ }
+ p := b.level(f)
+ q := b.level(g)
+ r := b.level(h)
+ low := b.pushref(b.ite(b.iteLow(p, q, r, f), b.iteLow(q, p, r, g), b.iteLow(r, p, q, h)))
+ high := b.pushref(b.ite(b.iteHigh(p, q, r, f), b.iteHigh(q, p, r, g), b.iteHigh(r, p, q, h)))
+ res := b.makenode(min3(p, q, r), low, high)
+ b.popref(2)
+ return b.setite(f, g, h, res)
+}
+
+// Exist returns the existential quantification of n for the variables in
+// varset, where varset is a node built with a method such as Makeset. We return
+// nil and set the error flag in b if there is an error.
+func (b *BDD) Exist(n, varset Node) Node {
+ if b.checkptr(n) != nil {
+ return b.seterror("Wrong node in call to Exist (n: %d)", *n)
+ }
+ if b.checkptr(varset) != nil {
+ return b.seterror("Wrong varset in call to Exist (%d)", *varset)
+ }
+ if err := b.quantset2cache(*varset); err != nil {
+ return nil
+ }
+ if *varset < 2 { // we have an empty set or a constant
+ return n
+ }
+
+ b.quantcache.id = cacheidEXIST
+ b.applycache.op = int(OPor)
+ b.initref()
+ b.pushref(*n)
+ b.pushref(*varset)
+ res := b.quant(*n, *varset)
+ b.popref(2)
+ return b.retnode(res)
+}
+
+func (b *BDD) quant(n, varset int) int {
+ if (n < 2) || (b.level(n) > b.quantlast) {
+ return n
+ }
+ // the hash for a quantification operation is simply n
+ if res := b.matchquant(n, varset); res >= 0 {
+ return res
+ }
+ low := b.pushref(b.quant(b.low(n), varset))
+ high := b.pushref(b.quant(b.high(n), varset))
+ var res int
+ if b.quantset[b.level(n)] == b.quantsetID {
+ res = b.apply(low, high)
+ } else {
+ res = b.makenode(b.level(n), low, high)
+ }
+ b.popref(2)
+ return b.setquant(n, varset, res)
+}
+
+// AppEx applies the binary operator *op* on the two operands, n1 and n2, then
+// performs an existential quantification over the variables in varset; meaning
+// it computes the value of (∃ varset . n1 op n2). This is done in a bottom up
+// manner such that both the apply and quantification is done on the lower nodes
+// before stepping up to the higher nodes. This makes AppEx much more efficient
+// than an apply operation followed by a quantification. Note that, when *op* is
+// a conjunction, this operation returns the relational product of two BDDs.
+func (b *BDD) AppEx(n1, n2 Node, op Operator, varset Node) Node {
+ // FIXME: should check that op is a binary operation
+ if int(op) > 3 {
+ return b.seterror("operator %s not supported in call to AppEx")
+ }
+ if b.checkptr(varset) != nil {
+ return b.seterror("wrong varset in call to AppEx (%d)", *varset)
+ }
+ if *varset < 2 { // we have an empty set
+ return b.Apply(n1, n2, op)
+ }
+ if b.checkptr(n1) != nil {
+ return b.seterror("wrong operand in call to AppEx %s(left: %d)", op, *n1)
+ }
+ if b.checkptr(n2) != nil {
+ return b.seterror("wrong operand in call to AppEx %s(right: %d)", op, *n2)
+ }
+ if err := b.quantset2cache(*varset); err != nil {
+ return nil
+ }
+
+ b.applycache.op = int(OPor)
+ b.appexcache.op = int(op)
+ b.appexcache.id = (*varset << 2) | b.appexcache.op
+ b.quantcache.id = (b.appexcache.id << 3) | cacheidAPPEX
+ b.initref()
+ b.pushref(*n1)
+ b.pushref(*n2)
+ b.pushref(*varset)
+ res := b.appquant(*n1, *n2, *varset)
+ b.popref(3)
+ return b.retnode(res)
+}
+
+func (b *BDD) appquant(left, right, varset int) int {
+ switch Operator(b.appexcache.op) {
+ case OPand:
+ if left == 0 || right == 0 {
+ return 0
+ }
+ if left == right {
+ return b.quant(left, varset)
+ }
+ if left == 1 {
+ return b.quant(right, varset)
+ }
+ if right == 1 {
+ return b.quant(left, varset)
+ }
+ case OPor:
+ if left == 1 || right == 1 {
+ return 1
+ }
+ if left == right {
+ return b.quant(left, varset)
+ }
+ if left == 0 {
+ return b.quant(right, varset)
+ }
+ if right == 0 {
+ return b.quant(left, varset)
+ }
+ case OPxor:
+ if left == right {
+ return 0
+ }
+ if left == 0 {
+ return b.quant(right, varset)
+ }
+ if right == 0 {
+ return b.quant(left, varset)
+ }
+ case OPnand:
+ if left == 0 || right == 0 {
+ return 1
+ }
+ case OPnor:
+ if left == 1 || right == 1 {
+ return 0
+ }
+ default:
+ // OPnot and OPsimplify should not be used in apply.
+ //
+ // FIXME: we are raising an error for other operations that would be OK.
+ b.seterror("unauthorized operation (%s) in AppEx", b.applycache.op)
+ return -1
+ }
+
+ // we check for errors
+ if left < 0 || right < 0 {
+ b.seterror("unexpected error in appquant")
+ return -1
+ }
+
+ // we deal with the other cases when the two operands are constants
+ if (left < 2) && (right < 2) {
+ return opres[b.appexcache.op][left][right]
+ }
+
+ // and the case where we have no more variables to quantify
+ if (b.level(left) > b.quantlast) && (b.level(right) > b.quantlast) {
+ oldop := b.applycache.op
+ b.applycache.op = b.appexcache.op
+ res := b.apply(left, right)
+ b.applycache.op = oldop
+ return res
+ }
+
+ // next we check if the operation is already in our cache
+ if res := b.matchappex(left, right); res >= 0 {
+ return res
+ }
+ leftlvl := b.level(left)
+ rightlvl := b.level(right)
+ var res int
+ if leftlvl == rightlvl {
+ low := b.pushref(b.appquant(b.low(left), b.low(right), varset))
+ high := b.pushref(b.appquant(b.high(left), b.high(right), varset))
+ if b.quantset[leftlvl] == b.quantsetID {
+ res = b.apply(low, high)
+ } else {
+ res = b.makenode(leftlvl, low, high)
+ }
+ } else {
+ if leftlvl < rightlvl {
+ low := b.pushref(b.appquant(b.low(left), right, varset))
+ high := b.pushref(b.appquant(b.high(left), right, varset))
+ if b.quantset[leftlvl] == b.quantsetID {
+ res = b.apply(low, high)
+ } else {
+ res = b.makenode(leftlvl, low, high)
+ }
+ } else {
+ low := b.pushref(b.appquant(left, b.low(right), varset))
+ high := b.pushref(b.appquant(left, b.high(right), varset))
+ if b.quantset[rightlvl] == b.quantsetID {
+ res = b.apply(low, high)
+ } else {
+ res = b.makenode(rightlvl, low, high)
+ }
+ }
+ }
+ b.popref(2)
+ return b.setappex(left, right, res)
+}
+
+// Replace takes a Replacer and computes the result of n after replacing old
+// variables with new ones. See type Replacer.
+func (b *BDD) Replace(n Node, r Replacer) Node {
+ if b.checkptr(n) != nil {
+ return b.seterror("wrong operand in call to Replace (%d)", *n)
+ }
+ b.initref()
+ b.pushref(*n)
+ b.replacecache.id = r.Id()
+ res := b.retnode(b.replace(*n, r))
+ b.popref(1)
+ return res
+}
+
+func (b *BDD) replace(n int, r Replacer) int {
+ image, ok := r.Replace(b.level(n))
+ if !ok {
+ return n
+ }
+ if res := b.matchreplace(n); res >= 0 {
+ return res
+ }
+ low := b.pushref(b.replace(b.low(n), r))
+ high := b.pushref(b.replace(b.high(n), r))
+ res := b.correctify(image, low, high)
+ b.popref(2)
+ return b.setreplace(n, res)
+}
+
+func (b *BDD) correctify(level int32, low, high int) int {
+ /* FIXME: we do not use the cache here */
+ if (level < b.level(low)) && (level < b.level(high)) {
+ return b.makenode(level, low, high)
+ }
+
+ if (level == b.level(low)) || (level == b.level(high)) {
+ b.seterror("error in replace level (%d) == low (%d:%d) or high (%d:%d)", level, low, b.level(low), high, b.level(high))
+ return -1
+ }
+
+ if b.level(low) == b.level(high) {
+ left := b.pushref(b.correctify(level, b.low(low), b.low(high)))
+ right := b.pushref(b.correctify(level, b.high(low), b.high(high)))
+ res := b.makenode(b.level(low), left, right)
+ b.popref(2)
+ return res
+ }
+
+ if b.level(low) < b.level(high) {
+ left := b.pushref(b.correctify(level, b.low(low), high))
+ right := b.pushref(b.correctify(level, b.high(low), high))
+ res := b.makenode(b.level(low), left, right)
+ b.popref(2)
+ return res
+ }
+
+ left := b.pushref(b.correctify(level, low, b.low(high)))
+ right := b.pushref(b.correctify(level, low, b.high(high)))
+ res := b.makenode(b.level(high), left, right)
+ b.popref(2)
+ return res
+}
+
+// Satcount computes the number of satisfying variable assignments for the
+// function denoted by n. We return a result using arbitrary-precision
+// arithmetic to avoid possible overflows. The result is zero (and we set the
+// error flag of b) if there is an error.
+func (b *BDD) Satcount(n Node) *big.Int {
+ res := big.NewInt(0)
+ if b.checkptr(n) != nil {
+ b.seterror("Wrong operand in call to Satcount (%d)", *n)
+ return res
+ }
+ // We compute 2^level with a bit shift 1 << level
+ res.SetBit(res, int(b.level(*n)), 1)
+ satc := make(map[int]*big.Int)
+ return res.Mul(res, b.satcount(*n, satc))
+}
+
+func (b *BDD) satcount(n int, satc map[int]*big.Int) *big.Int {
+ if n < 2 {
+ return big.NewInt(int64(n))
+ }
+ // we use satc to memoize the value of satcount for each nodes
+ res, ok := satc[n]
+ if ok {
+ return res
+ }
+ level := b.level(n)
+ low := b.low(n)
+ high := b.high(n)
+
+ res = big.NewInt(0)
+ two := big.NewInt(0)
+ two.SetBit(two, int(b.level(low)-level-1), 1)
+ res.Add(res, two.Mul(two, b.satcount(low, satc)))
+ two = big.NewInt(0)
+ two.SetBit(two, int(b.level(high)-level-1), 1)
+ res.Add(res, two.Mul(two, b.satcount(high, satc)))
+ satc[n] = res
+ return res
+}
+
+// Allsat Iterates through all legal variable assignments for n and calls the
+// function f on each of them. We pass an int slice of length varnum to f where
+// each entry is either 0 if the variable is false, 1 if it is true, and -1 if
+// it is a don't care. We stop and return an error if f returns an error at some
+// point.
+func (b *BDD) Allsat(f func([]int) error, n Node) error {
+ if b.checkptr(n) != nil {
+ return fmt.Errorf("wrong node in call to Allsat (%d)", *n)
+ }
+ prof := make([]int, b.varnum)
+ for k := range prof {
+ prof[k] = -1
+ }
+ // the function does not create new nodes, so we do not need to take care of
+ // possible resizing
+ return b.allsat(*n, prof, f)
+}
+
+func (b *BDD) allsat(n int, prof []int, f func([]int) error) error {
+ if n == 1 {
+ return f(prof)
+ }
+ if n == 0 {
+ return nil
+ }
+
+ if low := b.low(n); low != 0 {
+ prof[b.level(n)] = 0
+ for v := b.level(low) - 1; v > b.level(n); v-- {
+ prof[v] = -1
+ }
+ if err := b.allsat(low, prof, f); err != nil {
+ return nil
+ }
+ }
+
+ if high := b.high(n); high != 0 {
+ prof[b.level(n)] = 1
+ for v := b.level(high) - 1; v > b.level(n); v-- {
+ prof[v] = -1
+ }
+ if err := b.allsat(high, prof, f); err != nil {
+ return nil
+ }
+ }
+ return nil
+}
+
+// Allnodes applies function f over all the nodes accessible from the nodes in
+// the sequence n..., or all the active nodes if n is absent (len(n) == 0). The
+// parameters to function f are the id, level, and id's of the low and high
+// successors of each node. The two constant nodes (True and False) have always
+// the id 1 and 0, respectively. The order in which nodes are visited is not
+// specified. The behavior is very similar to the one of Allsat. In particular,
+// we stop the computation and return an error if f returns an error at some
+// point.
+func (b *BDD) Allnodes(f func(id, level, low, high int) error, n ...Node) error {
+ for _, v := range n {
+ if err := b.checkptr(v); err != nil {
+ return fmt.Errorf("wrong node in call to Allnodes; %s", err)
+ }
+ }
+ // the function does not create new nodes, so we do not need to take care of
+ // possible resizing.
+ if len(n) == 0 {
+ // we call f over all active nodes
+ return b.allnodes(f)
+ }
+ return b.allnodesfrom(f, n)
+}
diff --git a/vendor/github.com/dalzilio/rudd/operator.go b/vendor/github.com/dalzilio/rudd/operator.go
new file mode 100644
index 0000000..55a5627
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/operator.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+// Operator describe the potential (binary) operations available on an Apply.
+// Only the first four operators (from OPand to OPnand) can be used in AppEx.
+type Operator int
+
+const (
+ OPand Operator = iota
+ OPxor
+ OPor
+ OPnand
+ OPnor
+ OPimp
+ OPbiimp
+ OPdiff
+ OPless
+ OPinvimp
+ // opnot, for negation, is the only unary operation. It should not be used
+ // in Apply
+ opnot
+)
+
+var opnames = [12]string{
+ OPand: "and",
+ OPxor: "xor",
+ OPor: "or",
+ OPnand: "nand",
+ OPnor: "nor",
+ OPimp: "imp",
+ OPbiimp: "biimp",
+ OPdiff: "diff",
+ OPless: "less",
+ OPinvimp: "invimp",
+ opnot: "not",
+}
+
+func (op Operator) String() string {
+ return opnames[op]
+}
+
+var opres = [12][2][2]int{
+ // 00 01 10 11
+ OPand: {0: [2]int{0: 0, 1: 0}, 1: [2]int{0: 0, 1: 1}}, // 0001
+ OPxor: {0: [2]int{0: 0, 1: 1}, 1: [2]int{0: 1, 1: 0}}, // 0110
+ OPor: {0: [2]int{0: 0, 1: 1}, 1: [2]int{0: 1, 1: 1}}, // 0111
+ OPnand: {0: [2]int{0: 1, 1: 1}, 1: [2]int{0: 1, 1: 0}}, // 1110
+ OPnor: {0: [2]int{0: 1, 1: 0}, 1: [2]int{0: 0, 1: 0}}, // 1000
+ OPimp: {0: [2]int{0: 1, 1: 1}, 1: [2]int{0: 0, 1: 1}}, // 1101
+ OPbiimp: {0: [2]int{0: 1, 1: 0}, 1: [2]int{0: 0, 1: 1}}, // 1001
+ OPdiff: {0: [2]int{0: 0, 1: 0}, 1: [2]int{0: 1, 1: 0}}, // 0010
+ OPless: {0: [2]int{0: 0, 1: 1}, 1: [2]int{0: 0, 1: 0}}, // 0100
+ OPinvimp: {0: [2]int{0: 1, 1: 0}, 1: [2]int{0: 1, 1: 1}}, // 1011
+}
diff --git a/vendor/github.com/dalzilio/rudd/out.dot b/vendor/github.com/dalzilio/rudd/out.dot
new file mode 100644
index 0000000..0b049cc
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/out.dot
@@ -0,0 +1,14 @@
+digraph G {
+1 [shape=box, label="1", style=filled, shape=box, height=0.3, width=0.3];
+10 [label=<
+ <FONT POINT-SIZE="20">4</FONT>
+ <FONT POINT-SIZE="10">[10]</FONT>
+>];
+10 -> 1 [style=filled];
+19 [label=<
+ <FONT POINT-SIZE="20">1</FONT>
+ <FONT POINT-SIZE="10">[19]</FONT>
+>];
+19 -> 10 [style=dotted];
+19 -> 1 [style=filled];
+}
diff --git a/vendor/github.com/dalzilio/rudd/primes.go b/vendor/github.com/dalzilio/rudd/primes.go
new file mode 100644
index 0000000..d0bb2f6
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/primes.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import "math/big"
+
+// functions for Prime number calculations
+
+func hasFactor(src int, n int) bool {
+ if (src != n) && (src%n == 0) {
+ return true
+ }
+ return false
+}
+
+func hasEasyFactors(src int) bool {
+ return hasFactor(src, 3) || hasFactor(src, 5) || hasFactor(src, 7) || hasFactor(src, 11) || hasFactor(src, 13)
+}
+
+func primeGte(src int) int {
+ if src%2 == 0 {
+ src++
+ }
+
+ for {
+ if hasEasyFactors(src) {
+ src = src + 2
+ continue
+ }
+ // ProbablyPrime is 100% accurate for inputs less than 2⁶⁴.
+ if big.NewInt(int64(src)).ProbablyPrime(0) {
+ return src
+ }
+ src = src + 2
+ }
+}
+
+func primeLte(src int) int {
+ if src == 0 {
+ return 1
+ }
+
+ if src%2 == 0 {
+ src--
+ }
+
+ for {
+ if hasEasyFactors(src) {
+ src = src - 2
+ continue
+ }
+ // ProbablyPrime is 100% accurate for inputs less than 2⁶⁴.
+ if big.NewInt(int64(src)).ProbablyPrime(0) {
+ return src
+ }
+ src = src - 2
+ }
+}
diff --git a/vendor/github.com/dalzilio/rudd/replace.go b/vendor/github.com/dalzilio/rudd/replace.go
new file mode 100644
index 0000000..6a33658
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/replace.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "fmt"
+ "math"
+)
+
+var _REPLACEID = 1
+
+// Replacer is the types of substitution objects used in a Replace operation,
+// that substitutes variables in a BDD "function". The only method returning an
+// object of this type is the NewReplacer method. The result obtained when using
+// a replacer created from a BDD, in a Replace operation over a different BDD is
+// unspecified.
+type Replacer interface {
+ Replace(int32) (int32, bool)
+ Id() int
+}
+
+type replacer struct {
+ id int // unique identifier used for caching intermediate results
+ image []int32 // map the level of old variables to the level of new variables
+ last int32 // last index in the Replacer, to speed up computations
+}
+
+func (r *replacer) String() string {
+ res := fmt.Sprintf("replacer(last: %d)[", r.last)
+ first := true
+ for k, v := range r.image {
+ if k != int(v) {
+ if !first {
+ res += ", "
+ }
+ first = false
+ res += fmt.Sprintf("%d<-%d", k, v)
+ }
+ }
+ return res + "]"
+}
+
+func (r *replacer) Replace(level int32) (int32, bool) {
+ if level > r.last {
+ return level, false
+ }
+ return r.image[level], true
+}
+
+func (r *replacer) Id() int {
+ return r.id
+}
+
+// NewReplacer returns a Replacer that can be used for substituting variable
+// oldvars[k] with newvars[k] in the BDD b. We return an error if the two slices
+// do not have the same length or if we find the same index twice in either of
+// them. All values must be in the interval [0..Varnum).
+func (b *BDD) NewReplacer(oldvars, newvars []int) (Replacer, error) {
+ res := &replacer{}
+ if len(oldvars) != len(newvars) {
+ return nil, fmt.Errorf("unmatched length of slices")
+ }
+ if _REPLACEID == (math.MaxInt32 >> 2) {
+ return nil, fmt.Errorf("too many replacers created")
+ }
+ res.id = (_REPLACEID << 2) | cacheidREPLACE
+ _REPLACEID++
+ varnum := b.Varnum()
+ support := make([]bool, varnum)
+ res.image = make([]int32, varnum)
+ for k := range res.image {
+ res.image[k] = int32(k)
+ }
+ for k, v := range oldvars {
+ if support[v] {
+ return nil, fmt.Errorf("duplicate variable (%d) in oldvars", v)
+ }
+ if v >= varnum {
+ return nil, fmt.Errorf("invalid variable in oldvars (%d)", v)
+ }
+ if newvars[k] >= varnum {
+ return nil, fmt.Errorf("invalid variable in newvars (%d)", v)
+ }
+ support[v] = true
+ res.image[v] = int32(newvars[k])
+ if int32(v) > res.last {
+ res.last = int32(v)
+ }
+ }
+ for _, v := range newvars {
+ if int(res.image[v]) != v {
+ return nil, fmt.Errorf("variable in newvars (%d) also occur in oldvars", v)
+ }
+ }
+ return res, nil
+}
diff --git a/vendor/github.com/dalzilio/rudd/stdio.go b/vendor/github.com/dalzilio/rudd/stdio.go
new file mode 100644
index 0000000..05686f8
--- /dev/null
+++ b/vendor/github.com/dalzilio/rudd/stdio.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2021 Silvano DAL ZILIO
+//
+// MIT License
+
+package rudd
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "text/tabwriter"
+)
+
+var hsizes []string = []string{"k", "M", "G"}
+
+// humanSize returns a human-readable version of a size in bytes
+func humanSize(b int, unit uintptr) string {
+ b = b * int(unit)
+ if b < 0 {
+ return "(error)"
+ }
+ if b < 1000 {
+ return fmt.Sprintf("%d B", b)
+ }
+ c := 0
+ s := float64(b) / 1000.0
+ for {
+ if s < 1000 {
+ return fmt.Sprintf("%.1f %sB", s, hsizes[c])
+ }
+ s = float64(s) / 1000.0
+ c++
+ if c >= len(hsizes) {
+ return fmt.Sprintf("%.1f TB", s)
+ }
+ }
+}
+
+// Print writes a textual representation of the BDD with roots in n to an output
+// stream. The result is a table with rows giving the levels and ids of the
+// nodes and its low and high part.
+//
+// We print all the nodes in b if n is absent (len(n) == 0), so a call to
+// b.Print(os.Stdout) prints a table containing all the active nodes of the BDD
+// to the standard output. We also simply print the string "True" and "False",
+// respectively, if len(n) == 1 and n[0] is the constant True or False.
+func (b *BDD) Print(w io.Writer, n ...Node) {
+ if mesg := b.Error(); mesg != "" {
+ fmt.Fprintf(w, "Error: %s\n", mesg)
+ return
+ }
+ if len(n) == 1 && n[0] != nil {
+ if *n[0] == 0 {
+ fmt.Fprintln(w, "False")
+ return
+ }
+ if *n[0] == 1 {
+ fmt.Fprintln(w, "True")
+ return
+ }
+ }
+ // we build a slice of nodes sorted by ids
+ nodes := make([][4]int, 0)
+ err := b.Allnodes(func(id, level, low, high int) error {
+ i := sort.Search(len(nodes), func(i int) bool {
+ return nodes[i][0] >= id
+ })
+ nodes = append(nodes, [4]int{})
+ copy(nodes[i+1:], nodes[i:])
+ nodes[i] = [4]int{id, level, low, high}
+ return nil
+ }, n...)
+ if err != nil {
+ fmt.Fprintln(w, err.Error())
+ return
+ }
+ printSet(w, nodes)
+}
+
+func printSet(w io.Writer, nodes [][4]int) {
+ tw := tabwriter.NewWriter(w, 0, 0, 0, ' ', 0)
+ for _, n := range nodes {
+ if n[0] > 1 {
+ fmt.Fprintf(tw, "%d\t[%d\t] ? \t%d\t : %d\n", n[0], n[1], n[2], n[3])
+ }
+ }
+ tw.Flush()
+}
+
+// Dot writes a graph-like description of the BDD with roots in n to an output
+// stream using Graphviz's dot format. The behavior of Dot is very similar to
+// the one of Print. In particular, we include all the active nodes of b if n is
+// absent (len(n) == 0).
+func (b *BDD) Dot(w io.Writer, n ...Node) error {
+ if mesg := b.Error(); mesg != "" {
+ fmt.Fprintf(w, "Error: %s\n", mesg)
+ return fmt.Errorf(mesg)
+ }
+ // we write the result by visiting each node but we never draw edges to the
+ // False constant.
+ fmt.Fprintln(w, "digraph G {")
+ fmt.Fprintln(w, "1 [shape=box, label=\"1\", style=filled, shape=box, height=0.3, width=0.3];")
+ _ = b.Allnodes(func(id, level, low, high int) error {
+ if id > 1 {
+ fmt.Fprintf(w, "%d %s\n", id, dotlabel(id, level))
+ if low != 0 {
+ fmt.Fprintf(w, "%d -> %d [style=dotted];\n", id, low)
+ }
+ if high != 0 {
+ fmt.Fprintf(w, "%d -> %d [style=filled];\n", id, high)
+ }
+ }
+ return nil
+ }, n...)
+ fmt.Fprintln(w, "}")
+ return nil
+}
+
+func dotlabel(a int, b int) string {
+ return fmt.Sprintf(`[label=<
+ <FONT POINT-SIZE="20">%d</FONT>
+ <FONT POINT-SIZE="10">[%d]</FONT>
+>];`, b, a)
+}