42
vendor/go.starlark.net/starlark/debug.go
generated
vendored
Normal file
42
vendor/go.starlark.net/starlark/debug.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package starlark
|
||||
|
||||
import "go.starlark.net/syntax"
|
||||
|
||||
// This file defines an experimental API for the debugging tools.
|
||||
// Some of these declarations expose details of internal packages.
|
||||
// (The debugger makes liberal use of exported fields of unexported types.)
|
||||
// Breaking changes may occur without notice.
|
||||
|
||||
// Local returns the value of the i'th local variable.
|
||||
// It may be nil if not yet assigned.
|
||||
//
|
||||
// Local may be called only for frames whose Callable is a *Function (a
|
||||
// function defined by Starlark source code), and only while the frame
|
||||
// is active; it will panic otherwise.
|
||||
//
|
||||
// This function is provided only for debugging tools.
|
||||
//
|
||||
// THIS API IS EXPERIMENTAL AND MAY CHANGE WITHOUT NOTICE.
|
||||
func (fr *frame) Local(i int) Value { return fr.locals[i] }
|
||||
|
||||
// DebugFrame is the debugger API for a frame of the interpreter's call stack.
|
||||
//
|
||||
// Most applications have no need for this API; use CallFrame instead.
|
||||
//
|
||||
// Clients must not retain a DebugFrame nor call any of its methods once
|
||||
// the current built-in call has returned or execution has resumed
|
||||
// after a breakpoint as this may have unpredictable effects, including
|
||||
// but not limited to retention of object that would otherwise be garbage.
|
||||
type DebugFrame interface {
|
||||
Callable() Callable // returns the frame's function
|
||||
Local(i int) Value // returns the value of the (Starlark) frame's ith local variable
|
||||
Position() syntax.Position // returns the current position of execution in this frame
|
||||
}
|
||||
|
||||
// DebugFrame returns the debugger interface for
|
||||
// the specified frame of the interpreter's call stack.
|
||||
// Frame numbering is as for Thread.CallFrame.
|
||||
//
|
||||
// This function is intended for use in debugging tools.
|
||||
// Most applications should have no need for it; use CallFrame instead.
|
||||
func (thread *Thread) DebugFrame(depth int) DebugFrame { return thread.frameAt(depth) }
|
||||
3
vendor/go.starlark.net/starlark/empty.s
generated
vendored
Normal file
3
vendor/go.starlark.net/starlark/empty.s
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// The presence of this file allows the package to use the
|
||||
// "go:linkname" hack to call non-exported functions in the
|
||||
// Go runtime, such as hardware-accelerated string hashing.
|
||||
1497
vendor/go.starlark.net/starlark/eval.go
generated
vendored
Normal file
1497
vendor/go.starlark.net/starlark/eval.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
373
vendor/go.starlark.net/starlark/hashtable.go
generated
vendored
Normal file
373
vendor/go.starlark.net/starlark/hashtable.go
generated
vendored
Normal file
@@ -0,0 +1,373 @@
|
||||
// Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package starlark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
_ "unsafe" // for go:linkname hack
|
||||
)
|
||||
|
||||
// hashtable is used to represent Starlark dict and set values.
|
||||
// It is a hash table whose key/value entries form a doubly-linked list
|
||||
// in the order the entries were inserted.
|
||||
type hashtable struct {
|
||||
table []bucket // len is zero or a power of two
|
||||
bucket0 [1]bucket // inline allocation for small maps.
|
||||
len uint32
|
||||
itercount uint32 // number of active iterators (ignored if frozen)
|
||||
head *entry // insertion order doubly-linked list; may be nil
|
||||
tailLink **entry // address of nil link at end of list (perhaps &head)
|
||||
frozen bool
|
||||
}
|
||||
|
||||
const bucketSize = 8
|
||||
|
||||
type bucket struct {
|
||||
entries [bucketSize]entry
|
||||
next *bucket // linked list of buckets
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
hash uint32 // nonzero => in use
|
||||
key, value Value
|
||||
next *entry // insertion order doubly-linked list; may be nil
|
||||
prevLink **entry // address of link to this entry (perhaps &head)
|
||||
}
|
||||
|
||||
func (ht *hashtable) init(size int) {
|
||||
if size < 0 {
|
||||
panic("size < 0")
|
||||
}
|
||||
nb := 1
|
||||
for overloaded(size, nb) {
|
||||
nb = nb << 1
|
||||
}
|
||||
if nb < 2 {
|
||||
ht.table = ht.bucket0[:1]
|
||||
} else {
|
||||
ht.table = make([]bucket, nb)
|
||||
}
|
||||
ht.tailLink = &ht.head
|
||||
}
|
||||
|
||||
func (ht *hashtable) freeze() {
|
||||
if !ht.frozen {
|
||||
ht.frozen = true
|
||||
for i := range ht.table {
|
||||
for p := &ht.table[i]; p != nil; p = p.next {
|
||||
for i := range p.entries {
|
||||
e := &p.entries[i]
|
||||
if e.hash != 0 {
|
||||
e.key.Freeze()
|
||||
e.value.Freeze()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *hashtable) insert(k, v Value) error {
|
||||
if ht.frozen {
|
||||
return fmt.Errorf("cannot insert into frozen hash table")
|
||||
}
|
||||
if ht.itercount > 0 {
|
||||
return fmt.Errorf("cannot insert into hash table during iteration")
|
||||
}
|
||||
if ht.table == nil {
|
||||
ht.init(1)
|
||||
}
|
||||
h, err := k.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h == 0 {
|
||||
h = 1 // zero is reserved
|
||||
}
|
||||
|
||||
retry:
|
||||
var insert *entry
|
||||
|
||||
// Inspect each bucket in the bucket list.
|
||||
p := &ht.table[h&(uint32(len(ht.table)-1))]
|
||||
for {
|
||||
for i := range p.entries {
|
||||
e := &p.entries[i]
|
||||
if e.hash != h {
|
||||
if e.hash == 0 {
|
||||
// Found empty entry; make a note.
|
||||
insert = e
|
||||
}
|
||||
continue
|
||||
}
|
||||
if eq, err := Equal(k, e.key); err != nil {
|
||||
return err // e.g. excessively recursive tuple
|
||||
} else if !eq {
|
||||
continue
|
||||
}
|
||||
// Key already present; update value.
|
||||
e.value = v
|
||||
return nil
|
||||
}
|
||||
if p.next == nil {
|
||||
break
|
||||
}
|
||||
p = p.next
|
||||
}
|
||||
|
||||
// Key not found. p points to the last bucket.
|
||||
|
||||
// Does the number of elements exceed the buckets' load factor?
|
||||
if overloaded(int(ht.len), len(ht.table)) {
|
||||
ht.grow()
|
||||
goto retry
|
||||
}
|
||||
|
||||
if insert == nil {
|
||||
// No space in existing buckets. Add a new one to the bucket list.
|
||||
b := new(bucket)
|
||||
p.next = b
|
||||
insert = &b.entries[0]
|
||||
}
|
||||
|
||||
// Insert key/value pair.
|
||||
insert.hash = h
|
||||
insert.key = k
|
||||
insert.value = v
|
||||
|
||||
// Append entry to doubly-linked list.
|
||||
insert.prevLink = ht.tailLink
|
||||
*ht.tailLink = insert
|
||||
ht.tailLink = &insert.next
|
||||
|
||||
ht.len++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func overloaded(elems, buckets int) bool {
|
||||
const loadFactor = 6.5 // just a guess
|
||||
return elems >= bucketSize && float64(elems) >= loadFactor*float64(buckets)
|
||||
}
|
||||
|
||||
func (ht *hashtable) grow() {
|
||||
// Double the number of buckets and rehash.
|
||||
// TODO(adonovan): opt:
|
||||
// - avoid reentrant calls to ht.insert, and specialize it.
|
||||
// e.g. we know the calls to Equals will return false since
|
||||
// there are no duplicates among the old keys.
|
||||
// - saving the entire hash in the bucket would avoid the need to
|
||||
// recompute the hash.
|
||||
// - save the old buckets on a free list.
|
||||
ht.table = make([]bucket, len(ht.table)<<1)
|
||||
oldhead := ht.head
|
||||
ht.head = nil
|
||||
ht.tailLink = &ht.head
|
||||
ht.len = 0
|
||||
for e := oldhead; e != nil; e = e.next {
|
||||
ht.insert(e.key, e.value)
|
||||
}
|
||||
ht.bucket0[0] = bucket{} // clear out unused initial bucket
|
||||
}
|
||||
|
||||
func (ht *hashtable) lookup(k Value) (v Value, found bool, err error) {
|
||||
h, err := k.Hash()
|
||||
if err != nil {
|
||||
return nil, false, err // unhashable
|
||||
}
|
||||
if h == 0 {
|
||||
h = 1 // zero is reserved
|
||||
}
|
||||
if ht.table == nil {
|
||||
return None, false, nil // empty
|
||||
}
|
||||
|
||||
// Inspect each bucket in the bucket list.
|
||||
for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next {
|
||||
for i := range p.entries {
|
||||
e := &p.entries[i]
|
||||
if e.hash == h {
|
||||
if eq, err := Equal(k, e.key); err != nil {
|
||||
return nil, false, err // e.g. excessively recursive tuple
|
||||
} else if eq {
|
||||
return e.value, true, nil // found
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return None, false, nil // not found
|
||||
}
|
||||
|
||||
// Items returns all the items in the map (as key/value pairs) in insertion order.
|
||||
func (ht *hashtable) items() []Tuple {
|
||||
items := make([]Tuple, 0, ht.len)
|
||||
array := make([]Value, ht.len*2) // allocate a single backing array
|
||||
for e := ht.head; e != nil; e = e.next {
|
||||
pair := Tuple(array[:2:2])
|
||||
array = array[2:]
|
||||
pair[0] = e.key
|
||||
pair[1] = e.value
|
||||
items = append(items, pair)
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func (ht *hashtable) first() (Value, bool) {
|
||||
if ht.head != nil {
|
||||
return ht.head.key, true
|
||||
}
|
||||
return None, false
|
||||
}
|
||||
|
||||
func (ht *hashtable) keys() []Value {
|
||||
keys := make([]Value, 0, ht.len)
|
||||
for e := ht.head; e != nil; e = e.next {
|
||||
keys = append(keys, e.key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (ht *hashtable) delete(k Value) (v Value, found bool, err error) {
|
||||
if ht.frozen {
|
||||
return nil, false, fmt.Errorf("cannot delete from frozen hash table")
|
||||
}
|
||||
if ht.itercount > 0 {
|
||||
return nil, false, fmt.Errorf("cannot delete from hash table during iteration")
|
||||
}
|
||||
if ht.table == nil {
|
||||
return None, false, nil // empty
|
||||
}
|
||||
h, err := k.Hash()
|
||||
if err != nil {
|
||||
return nil, false, err // unhashable
|
||||
}
|
||||
if h == 0 {
|
||||
h = 1 // zero is reserved
|
||||
}
|
||||
|
||||
// Inspect each bucket in the bucket list.
|
||||
for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next {
|
||||
for i := range p.entries {
|
||||
e := &p.entries[i]
|
||||
if e.hash == h {
|
||||
if eq, err := Equal(k, e.key); err != nil {
|
||||
return nil, false, err
|
||||
} else if eq {
|
||||
// Remove e from doubly-linked list.
|
||||
*e.prevLink = e.next
|
||||
if e.next == nil {
|
||||
ht.tailLink = e.prevLink // deletion of last entry
|
||||
} else {
|
||||
e.next.prevLink = e.prevLink
|
||||
}
|
||||
|
||||
v := e.value
|
||||
*e = entry{}
|
||||
ht.len--
|
||||
return v, true, nil // found
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(adonovan): opt: remove completely empty bucket from bucket list.
|
||||
|
||||
return None, false, nil // not found
|
||||
}
|
||||
|
||||
func (ht *hashtable) clear() error {
|
||||
if ht.frozen {
|
||||
return fmt.Errorf("cannot clear frozen hash table")
|
||||
}
|
||||
if ht.itercount > 0 {
|
||||
return fmt.Errorf("cannot clear hash table during iteration")
|
||||
}
|
||||
if ht.table != nil {
|
||||
for i := range ht.table {
|
||||
ht.table[i] = bucket{}
|
||||
}
|
||||
}
|
||||
ht.head = nil
|
||||
ht.tailLink = &ht.head
|
||||
ht.len = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// dump is provided as an aid to debugging.
|
||||
func (ht *hashtable) dump() {
|
||||
fmt.Printf("hashtable %p len=%d head=%p tailLink=%p",
|
||||
ht, ht.len, ht.head, ht.tailLink)
|
||||
if ht.tailLink != nil {
|
||||
fmt.Printf(" *tailLink=%p", *ht.tailLink)
|
||||
}
|
||||
fmt.Println()
|
||||
for j := range ht.table {
|
||||
fmt.Printf("bucket chain %d\n", j)
|
||||
for p := &ht.table[j]; p != nil; p = p.next {
|
||||
fmt.Printf("bucket %p\n", p)
|
||||
for i := range p.entries {
|
||||
e := &p.entries[i]
|
||||
fmt.Printf("\tentry %d @ %p hash=%d key=%v value=%v\n",
|
||||
i, e, e.hash, e.key, e.value)
|
||||
fmt.Printf("\t\tnext=%p &next=%p prev=%p",
|
||||
e.next, &e.next, e.prevLink)
|
||||
if e.prevLink != nil {
|
||||
fmt.Printf(" *prev=%p", *e.prevLink)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *hashtable) iterate() *keyIterator {
|
||||
if !ht.frozen {
|
||||
ht.itercount++
|
||||
}
|
||||
return &keyIterator{ht: ht, e: ht.head}
|
||||
}
|
||||
|
||||
type keyIterator struct {
|
||||
ht *hashtable
|
||||
e *entry
|
||||
}
|
||||
|
||||
func (it *keyIterator) Next(k *Value) bool {
|
||||
if it.e != nil {
|
||||
*k = it.e.key
|
||||
it.e = it.e.next
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *keyIterator) Done() {
|
||||
if !it.ht.frozen {
|
||||
it.ht.itercount--
|
||||
}
|
||||
}
|
||||
|
||||
// hashString computes the hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
if len(s) >= 12 {
|
||||
// Call the Go runtime's optimized hash implementation,
|
||||
// which uses the AESENC instruction on amd64 machines.
|
||||
return uint32(goStringHash(s, 0))
|
||||
}
|
||||
return softHashString(s)
|
||||
}
|
||||
|
||||
//go:linkname goStringHash runtime.stringHash
|
||||
func goStringHash(s string, seed uintptr) uintptr
|
||||
|
||||
// softHashString computes the FNV hash of s in software.
|
||||
func softHashString(s string) uint32 {
|
||||
var h uint32
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
350
vendor/go.starlark.net/starlark/int.go
generated
vendored
Normal file
350
vendor/go.starlark.net/starlark/int.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package starlark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"go.starlark.net/syntax"
|
||||
)
|
||||
|
||||
// Int is the type of a Starlark int.
|
||||
type Int struct {
|
||||
// We use only the signed 32 bit range of small to ensure
|
||||
// that small+small and small*small do not overflow.
|
||||
|
||||
small int64 // minint32 <= small <= maxint32
|
||||
big *big.Int // big != nil <=> value is not representable as int32
|
||||
}
|
||||
|
||||
// newBig allocates a new big.Int.
|
||||
func newBig(x int64) *big.Int {
|
||||
if 0 <= x && int64(big.Word(x)) == x {
|
||||
// x is guaranteed to fit into a single big.Word.
|
||||
// Most starlark ints are small,
|
||||
// but math/big assumes that since you've chosen to use math/big,
|
||||
// your big.Ints will probably grow, so it over-allocates.
|
||||
// Avoid that over-allocation by manually constructing a single-word slice.
|
||||
// See https://golang.org/cl/150999, which will hopefully land in Go 1.13.
|
||||
return new(big.Int).SetBits([]big.Word{big.Word(x)})
|
||||
}
|
||||
return big.NewInt(x)
|
||||
}
|
||||
|
||||
// MakeInt returns a Starlark int for the specified signed integer.
|
||||
func MakeInt(x int) Int { return MakeInt64(int64(x)) }
|
||||
|
||||
// MakeInt64 returns a Starlark int for the specified int64.
|
||||
func MakeInt64(x int64) Int {
|
||||
if math.MinInt32 <= x && x <= math.MaxInt32 {
|
||||
return Int{small: x}
|
||||
}
|
||||
return Int{big: newBig(x)}
|
||||
}
|
||||
|
||||
// MakeUint returns a Starlark int for the specified unsigned integer.
|
||||
func MakeUint(x uint) Int { return MakeUint64(uint64(x)) }
|
||||
|
||||
// MakeUint64 returns a Starlark int for the specified uint64.
|
||||
func MakeUint64(x uint64) Int {
|
||||
if x <= math.MaxInt32 {
|
||||
return Int{small: int64(x)}
|
||||
}
|
||||
if uint64(big.Word(x)) == x {
|
||||
// See comment in newBig for an explanation of this optimization.
|
||||
return Int{big: new(big.Int).SetBits([]big.Word{big.Word(x)})}
|
||||
}
|
||||
return Int{big: new(big.Int).SetUint64(x)}
|
||||
}
|
||||
|
||||
// MakeBigInt returns a Starlark int for the specified big.Int.
|
||||
// The caller must not subsequently modify x.
|
||||
func MakeBigInt(x *big.Int) Int {
|
||||
if n := x.BitLen(); n < 32 || n == 32 && x.Int64() == math.MinInt32 {
|
||||
return Int{small: x.Int64()}
|
||||
}
|
||||
return Int{big: x}
|
||||
}
|
||||
|
||||
var (
|
||||
zero, one = Int{small: 0}, Int{small: 1}
|
||||
oneBig = newBig(1)
|
||||
|
||||
_ HasUnary = Int{}
|
||||
)
|
||||
|
||||
// Unary implements the operations +int, -int, and ~int.
|
||||
func (i Int) Unary(op syntax.Token) (Value, error) {
|
||||
switch op {
|
||||
case syntax.MINUS:
|
||||
return zero.Sub(i), nil
|
||||
case syntax.PLUS:
|
||||
return i, nil
|
||||
case syntax.TILDE:
|
||||
return i.Not(), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Int64 returns the value as an int64.
|
||||
// If it is not exactly representable the result is undefined and ok is false.
|
||||
func (i Int) Int64() (_ int64, ok bool) {
|
||||
if i.big != nil {
|
||||
x, acc := bigintToInt64(i.big)
|
||||
if acc != big.Exact {
|
||||
return // inexact
|
||||
}
|
||||
return x, true
|
||||
}
|
||||
return i.small, true
|
||||
}
|
||||
|
||||
// BigInt returns the value as a big.Int.
|
||||
// The returned variable must not be modified by the client.
|
||||
func (i Int) BigInt() *big.Int {
|
||||
if i.big != nil {
|
||||
return i.big
|
||||
}
|
||||
return newBig(i.small)
|
||||
}
|
||||
|
||||
// Uint64 returns the value as a uint64.
|
||||
// If it is not exactly representable the result is undefined and ok is false.
|
||||
func (i Int) Uint64() (_ uint64, ok bool) {
|
||||
if i.big != nil {
|
||||
x, acc := bigintToUint64(i.big)
|
||||
if acc != big.Exact {
|
||||
return // inexact
|
||||
}
|
||||
return x, true
|
||||
}
|
||||
if i.small < 0 {
|
||||
return // inexact
|
||||
}
|
||||
return uint64(i.small), true
|
||||
}
|
||||
|
||||
// The math/big API should provide this function.
|
||||
func bigintToInt64(i *big.Int) (int64, big.Accuracy) {
|
||||
sign := i.Sign()
|
||||
if sign > 0 {
|
||||
if i.Cmp(maxint64) > 0 {
|
||||
return math.MaxInt64, big.Below
|
||||
}
|
||||
} else if sign < 0 {
|
||||
if i.Cmp(minint64) < 0 {
|
||||
return math.MinInt64, big.Above
|
||||
}
|
||||
}
|
||||
return i.Int64(), big.Exact
|
||||
}
|
||||
|
||||
// The math/big API should provide this function.
|
||||
func bigintToUint64(i *big.Int) (uint64, big.Accuracy) {
|
||||
sign := i.Sign()
|
||||
if sign > 0 {
|
||||
if i.BitLen() > 64 {
|
||||
return math.MaxUint64, big.Below
|
||||
}
|
||||
} else if sign < 0 {
|
||||
return 0, big.Above
|
||||
}
|
||||
return i.Uint64(), big.Exact
|
||||
}
|
||||
|
||||
var (
|
||||
minint64 = new(big.Int).SetInt64(math.MinInt64)
|
||||
maxint64 = new(big.Int).SetInt64(math.MaxInt64)
|
||||
)
|
||||
|
||||
func (i Int) Format(s fmt.State, ch rune) {
|
||||
if i.big != nil {
|
||||
i.big.Format(s, ch)
|
||||
return
|
||||
}
|
||||
newBig(i.small).Format(s, ch)
|
||||
}
|
||||
func (i Int) String() string {
|
||||
if i.big != nil {
|
||||
return i.big.Text(10)
|
||||
}
|
||||
return strconv.FormatInt(i.small, 10)
|
||||
}
|
||||
func (i Int) Type() string { return "int" }
|
||||
func (i Int) Freeze() {} // immutable
|
||||
func (i Int) Truth() Bool { return i.Sign() != 0 }
|
||||
func (i Int) Hash() (uint32, error) {
|
||||
var lo big.Word
|
||||
if i.big != nil {
|
||||
lo = i.big.Bits()[0]
|
||||
} else {
|
||||
lo = big.Word(i.small)
|
||||
}
|
||||
return 12582917 * uint32(lo+3), nil
|
||||
}
|
||||
func (x Int) CompareSameType(op syntax.Token, v Value, depth int) (bool, error) {
|
||||
y := v.(Int)
|
||||
if x.big != nil || y.big != nil {
|
||||
return threeway(op, x.BigInt().Cmp(y.BigInt())), nil
|
||||
}
|
||||
return threeway(op, signum64(x.small-y.small)), nil
|
||||
}
|
||||
|
||||
// Float returns the float value nearest i.
|
||||
func (i Int) Float() Float {
|
||||
if i.big != nil {
|
||||
f, _ := new(big.Float).SetInt(i.big).Float64()
|
||||
return Float(f)
|
||||
}
|
||||
return Float(i.small)
|
||||
}
|
||||
|
||||
func (x Int) Sign() int {
|
||||
if x.big != nil {
|
||||
return x.big.Sign()
|
||||
}
|
||||
return signum64(x.small)
|
||||
}
|
||||
|
||||
func (x Int) Add(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
return MakeBigInt(new(big.Int).Add(x.BigInt(), y.BigInt()))
|
||||
}
|
||||
return MakeInt64(x.small + y.small)
|
||||
}
|
||||
func (x Int) Sub(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
return MakeBigInt(new(big.Int).Sub(x.BigInt(), y.BigInt()))
|
||||
}
|
||||
return MakeInt64(x.small - y.small)
|
||||
}
|
||||
func (x Int) Mul(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
return MakeBigInt(new(big.Int).Mul(x.BigInt(), y.BigInt()))
|
||||
}
|
||||
return MakeInt64(x.small * y.small)
|
||||
}
|
||||
func (x Int) Or(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
return Int{big: new(big.Int).Or(x.BigInt(), y.BigInt())}
|
||||
}
|
||||
return Int{small: x.small | y.small}
|
||||
}
|
||||
func (x Int) And(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
return MakeBigInt(new(big.Int).And(x.BigInt(), y.BigInt()))
|
||||
}
|
||||
return Int{small: x.small & y.small}
|
||||
}
|
||||
func (x Int) Xor(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
return MakeBigInt(new(big.Int).Xor(x.BigInt(), y.BigInt()))
|
||||
}
|
||||
return Int{small: x.small ^ y.small}
|
||||
}
|
||||
func (x Int) Not() Int {
|
||||
if x.big != nil {
|
||||
return MakeBigInt(new(big.Int).Not(x.big))
|
||||
}
|
||||
return Int{small: ^x.small}
|
||||
}
|
||||
func (x Int) Lsh(y uint) Int { return MakeBigInt(new(big.Int).Lsh(x.BigInt(), y)) }
|
||||
func (x Int) Rsh(y uint) Int { return MakeBigInt(new(big.Int).Rsh(x.BigInt(), y)) }
|
||||
|
||||
// Precondition: y is nonzero.
|
||||
func (x Int) Div(y Int) Int {
|
||||
// http://python-history.blogspot.com/2010/08/why-pythons-integer-division-floors.html
|
||||
if x.big != nil || y.big != nil {
|
||||
xb, yb := x.BigInt(), y.BigInt()
|
||||
|
||||
var quo, rem big.Int
|
||||
quo.QuoRem(xb, yb, &rem)
|
||||
if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 {
|
||||
quo.Sub(&quo, oneBig)
|
||||
}
|
||||
return MakeBigInt(&quo)
|
||||
}
|
||||
quo := x.small / y.small
|
||||
rem := x.small % y.small
|
||||
if (x.small < 0) != (y.small < 0) && rem != 0 {
|
||||
quo -= 1
|
||||
}
|
||||
return MakeInt64(quo)
|
||||
}
|
||||
|
||||
// Precondition: y is nonzero.
|
||||
func (x Int) Mod(y Int) Int {
|
||||
if x.big != nil || y.big != nil {
|
||||
xb, yb := x.BigInt(), y.BigInt()
|
||||
|
||||
var quo, rem big.Int
|
||||
quo.QuoRem(xb, yb, &rem)
|
||||
if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 {
|
||||
rem.Add(&rem, yb)
|
||||
}
|
||||
return MakeBigInt(&rem)
|
||||
}
|
||||
rem := x.small % y.small
|
||||
if (x.small < 0) != (y.small < 0) && rem != 0 {
|
||||
rem += y.small
|
||||
}
|
||||
return Int{small: rem}
|
||||
}
|
||||
|
||||
func (i Int) rational() *big.Rat {
|
||||
if i.big != nil {
|
||||
return new(big.Rat).SetInt(i.big)
|
||||
}
|
||||
return new(big.Rat).SetInt64(i.small)
|
||||
}
|
||||
|
||||
// AsInt32 returns the value of x if is representable as an int32.
|
||||
func AsInt32(x Value) (int, error) {
|
||||
i, ok := x.(Int)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("got %s, want int", x.Type())
|
||||
}
|
||||
if i.big != nil {
|
||||
return 0, fmt.Errorf("%s out of range", i)
|
||||
}
|
||||
return int(i.small), nil
|
||||
}
|
||||
|
||||
// NumberToInt converts a number x to an integer value.
|
||||
// An int is returned unchanged, a float is truncated towards zero.
|
||||
// NumberToInt reports an error for all other values.
|
||||
func NumberToInt(x Value) (Int, error) {
|
||||
switch x := x.(type) {
|
||||
case Int:
|
||||
return x, nil
|
||||
case Float:
|
||||
f := float64(x)
|
||||
if math.IsInf(f, 0) {
|
||||
return zero, fmt.Errorf("cannot convert float infinity to integer")
|
||||
} else if math.IsNaN(f) {
|
||||
return zero, fmt.Errorf("cannot convert float NaN to integer")
|
||||
}
|
||||
return finiteFloatToInt(x), nil
|
||||
|
||||
}
|
||||
return zero, fmt.Errorf("cannot convert %s to int", x.Type())
|
||||
}
|
||||
|
||||
// finiteFloatToInt converts f to an Int, truncating towards zero.
|
||||
// f must be finite.
|
||||
func finiteFloatToInt(f Float) Int {
|
||||
if math.MinInt64 <= f && f <= math.MaxInt64 {
|
||||
// small values
|
||||
return MakeInt64(int64(f))
|
||||
}
|
||||
rat := f.rational()
|
||||
if rat == nil {
|
||||
panic(f) // non-finite
|
||||
}
|
||||
return MakeBigInt(new(big.Int).Div(rat.Num(), rat.Denom()))
|
||||
}
|
||||
637
vendor/go.starlark.net/starlark/interp.go
generated
vendored
Normal file
637
vendor/go.starlark.net/starlark/interp.go
generated
vendored
Normal file
@@ -0,0 +1,637 @@
|
||||
package starlark
|
||||
|
||||
// This file defines the bytecode interpreter.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"go.starlark.net/internal/compile"
|
||||
"go.starlark.net/internal/spell"
|
||||
"go.starlark.net/resolve"
|
||||
"go.starlark.net/syntax"
|
||||
)
|
||||
|
||||
const vmdebug = false // TODO(adonovan): use a bitfield of specific kinds of error.
|
||||
|
||||
// TODO(adonovan):
|
||||
// - optimize position table.
|
||||
// - opt: record MaxIterStack during compilation and preallocate the stack.
|
||||
|
||||
func (fn *Function) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) {
|
||||
if !resolve.AllowRecursion {
|
||||
// detect recursion
|
||||
for _, fr := range thread.stack[:len(thread.stack)-1] {
|
||||
// We look for the same function code,
|
||||
// not function value, otherwise the user could
|
||||
// defeat the check by writing the Y combinator.
|
||||
if frfn, ok := fr.Callable().(*Function); ok && frfn.funcode == fn.funcode {
|
||||
return nil, fmt.Errorf("function %s called recursively", fn.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f := fn.funcode
|
||||
fr := thread.frameAt(0)
|
||||
|
||||
// Allocate space for stack and locals.
|
||||
// Logically these do not escape from this frame
|
||||
// (See https://github.com/golang/go/issues/20533.)
|
||||
//
|
||||
// This heap allocation looks expensive, but I was unable to get
|
||||
// more than 1% real time improvement in a large alloc-heavy
|
||||
// benchmark (in which this alloc was 8% of alloc-bytes)
|
||||
// by allocating space for 8 Values in each frame, or
|
||||
// by allocating stack by slicing an array held by the Thread
|
||||
// that is expanded in chunks of min(k, nspace), for k=256 or 1024.
|
||||
nlocals := len(f.Locals)
|
||||
nspace := nlocals + f.MaxStack
|
||||
space := make([]Value, nspace)
|
||||
locals := space[:nlocals:nlocals] // local variables, starting with parameters
|
||||
stack := space[nlocals:] // operand stack
|
||||
|
||||
// Digest arguments and set parameters.
|
||||
err := setArgs(locals, fn, args, kwargs)
|
||||
if err != nil {
|
||||
return nil, thread.evalError(err)
|
||||
}
|
||||
|
||||
fr.locals = locals
|
||||
|
||||
if vmdebug {
|
||||
fmt.Printf("Entering %s @ %s\n", f.Name, f.Position(0))
|
||||
fmt.Printf("%d stack, %d locals\n", len(stack), len(locals))
|
||||
defer fmt.Println("Leaving ", f.Name)
|
||||
}
|
||||
|
||||
// Spill indicated locals to cells.
|
||||
// Each cell is a separate alloc to avoid spurious liveness.
|
||||
for _, index := range f.Cells {
|
||||
locals[index] = &cell{locals[index]}
|
||||
}
|
||||
|
||||
// TODO(adonovan): add static check that beneath this point
|
||||
// - there is exactly one return statement
|
||||
// - there is no redefinition of 'err'.
|
||||
|
||||
var iterstack []Iterator // stack of active iterators
|
||||
|
||||
sp := 0
|
||||
var pc uint32
|
||||
var result Value
|
||||
code := f.Code
|
||||
loop:
|
||||
for {
|
||||
fr.pc = pc
|
||||
|
||||
op := compile.Opcode(code[pc])
|
||||
pc++
|
||||
var arg uint32
|
||||
if op >= compile.OpcodeArgMin {
|
||||
// TODO(adonovan): opt: profile this.
|
||||
// Perhaps compiling big endian would be less work to decode?
|
||||
for s := uint(0); ; s += 7 {
|
||||
b := code[pc]
|
||||
pc++
|
||||
arg |= uint32(b&0x7f) << s
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if vmdebug {
|
||||
fmt.Fprintln(os.Stderr, stack[:sp]) // very verbose!
|
||||
compile.PrintOp(f, fr.pc, op, arg)
|
||||
}
|
||||
|
||||
switch op {
|
||||
case compile.NOP:
|
||||
// nop
|
||||
|
||||
case compile.DUP:
|
||||
stack[sp] = stack[sp-1]
|
||||
sp++
|
||||
|
||||
case compile.DUP2:
|
||||
stack[sp] = stack[sp-2]
|
||||
stack[sp+1] = stack[sp-1]
|
||||
sp += 2
|
||||
|
||||
case compile.POP:
|
||||
sp--
|
||||
|
||||
case compile.EXCH:
|
||||
stack[sp-2], stack[sp-1] = stack[sp-1], stack[sp-2]
|
||||
|
||||
case compile.EQL, compile.NEQ, compile.GT, compile.LT, compile.LE, compile.GE:
|
||||
op := syntax.Token(op-compile.EQL) + syntax.EQL
|
||||
y := stack[sp-1]
|
||||
x := stack[sp-2]
|
||||
sp -= 2
|
||||
ok, err2 := Compare(op, x, y)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
stack[sp] = Bool(ok)
|
||||
sp++
|
||||
|
||||
case compile.PLUS,
|
||||
compile.MINUS,
|
||||
compile.STAR,
|
||||
compile.SLASH,
|
||||
compile.SLASHSLASH,
|
||||
compile.PERCENT,
|
||||
compile.AMP,
|
||||
compile.PIPE,
|
||||
compile.CIRCUMFLEX,
|
||||
compile.LTLT,
|
||||
compile.GTGT,
|
||||
compile.IN:
|
||||
binop := syntax.Token(op-compile.PLUS) + syntax.PLUS
|
||||
if op == compile.IN {
|
||||
binop = syntax.IN // IN token is out of order
|
||||
}
|
||||
y := stack[sp-1]
|
||||
x := stack[sp-2]
|
||||
sp -= 2
|
||||
z, err2 := Binary(binop, x, y)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
stack[sp] = z
|
||||
sp++
|
||||
|
||||
case compile.UPLUS, compile.UMINUS, compile.TILDE:
|
||||
var unop syntax.Token
|
||||
if op == compile.TILDE {
|
||||
unop = syntax.TILDE
|
||||
} else {
|
||||
unop = syntax.Token(op-compile.UPLUS) + syntax.PLUS
|
||||
}
|
||||
x := stack[sp-1]
|
||||
y, err2 := Unary(unop, x)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
stack[sp-1] = y
|
||||
|
||||
case compile.INPLACE_ADD:
|
||||
y := stack[sp-1]
|
||||
x := stack[sp-2]
|
||||
sp -= 2
|
||||
|
||||
// It's possible that y is not Iterable but
|
||||
// nonetheless defines x+y, in which case we
|
||||
// should fall back to the general case.
|
||||
var z Value
|
||||
if xlist, ok := x.(*List); ok {
|
||||
if yiter, ok := y.(Iterable); ok {
|
||||
if err = xlist.checkMutable("apply += to"); err != nil {
|
||||
break loop
|
||||
}
|
||||
listExtend(xlist, yiter)
|
||||
z = xlist
|
||||
}
|
||||
}
|
||||
if z == nil {
|
||||
z, err = Binary(syntax.PLUS, x, y)
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
stack[sp] = z
|
||||
sp++
|
||||
|
||||
case compile.NONE:
|
||||
stack[sp] = None
|
||||
sp++
|
||||
|
||||
case compile.TRUE:
|
||||
stack[sp] = True
|
||||
sp++
|
||||
|
||||
case compile.FALSE:
|
||||
stack[sp] = False
|
||||
sp++
|
||||
|
||||
case compile.MANDATORY:
|
||||
stack[sp] = mandatory{}
|
||||
sp++
|
||||
|
||||
case compile.JMP:
|
||||
pc = arg
|
||||
|
||||
case compile.CALL, compile.CALL_VAR, compile.CALL_KW, compile.CALL_VAR_KW:
|
||||
var kwargs Value
|
||||
if op == compile.CALL_KW || op == compile.CALL_VAR_KW {
|
||||
kwargs = stack[sp-1]
|
||||
sp--
|
||||
}
|
||||
|
||||
var args Value
|
||||
if op == compile.CALL_VAR || op == compile.CALL_VAR_KW {
|
||||
args = stack[sp-1]
|
||||
sp--
|
||||
}
|
||||
|
||||
// named args (pairs)
|
||||
var kvpairs []Tuple
|
||||
if nkvpairs := int(arg & 0xff); nkvpairs > 0 {
|
||||
kvpairs = make([]Tuple, 0, nkvpairs)
|
||||
kvpairsAlloc := make(Tuple, 2*nkvpairs) // allocate a single backing array
|
||||
sp -= 2 * nkvpairs
|
||||
for i := 0; i < nkvpairs; i++ {
|
||||
pair := kvpairsAlloc[:2:2]
|
||||
kvpairsAlloc = kvpairsAlloc[2:]
|
||||
pair[0] = stack[sp+2*i] // name
|
||||
pair[1] = stack[sp+2*i+1] // value
|
||||
kvpairs = append(kvpairs, pair)
|
||||
}
|
||||
}
|
||||
if kwargs != nil {
|
||||
// Add key/value items from **kwargs dictionary.
|
||||
dict, ok := kwargs.(IterableMapping)
|
||||
if !ok {
|
||||
err = fmt.Errorf("argument after ** must be a mapping, not %s", kwargs.Type())
|
||||
break loop
|
||||
}
|
||||
items := dict.Items()
|
||||
for _, item := range items {
|
||||
if _, ok := item[0].(String); !ok {
|
||||
err = fmt.Errorf("keywords must be strings, not %s", item[0].Type())
|
||||
break loop
|
||||
}
|
||||
}
|
||||
if len(kvpairs) == 0 {
|
||||
kvpairs = items
|
||||
} else {
|
||||
kvpairs = append(kvpairs, items...)
|
||||
}
|
||||
}
|
||||
|
||||
// positional args
|
||||
var positional Tuple
|
||||
if npos := int(arg >> 8); npos > 0 {
|
||||
positional = make(Tuple, npos)
|
||||
sp -= npos
|
||||
copy(positional, stack[sp:])
|
||||
}
|
||||
if args != nil {
|
||||
// Add elements from *args sequence.
|
||||
iter := Iterate(args)
|
||||
if iter == nil {
|
||||
err = fmt.Errorf("argument after * must be iterable, not %s", args.Type())
|
||||
break loop
|
||||
}
|
||||
var elem Value
|
||||
for iter.Next(&elem) {
|
||||
positional = append(positional, elem)
|
||||
}
|
||||
iter.Done()
|
||||
}
|
||||
|
||||
function := stack[sp-1]
|
||||
|
||||
if vmdebug {
|
||||
fmt.Printf("VM call %s args=%s kwargs=%s @%s\n",
|
||||
function, positional, kvpairs, f.Position(fr.pc))
|
||||
}
|
||||
|
||||
thread.endProfSpan()
|
||||
z, err2 := Call(thread, function, positional, kvpairs)
|
||||
thread.beginProfSpan()
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
if vmdebug {
|
||||
fmt.Printf("Resuming %s @ %s\n", f.Name, f.Position(0))
|
||||
}
|
||||
stack[sp-1] = z
|
||||
|
||||
case compile.ITERPUSH:
|
||||
x := stack[sp-1]
|
||||
sp--
|
||||
iter := Iterate(x)
|
||||
if iter == nil {
|
||||
err = fmt.Errorf("%s value is not iterable", x.Type())
|
||||
break loop
|
||||
}
|
||||
iterstack = append(iterstack, iter)
|
||||
|
||||
case compile.ITERJMP:
|
||||
iter := iterstack[len(iterstack)-1]
|
||||
if iter.Next(&stack[sp]) {
|
||||
sp++
|
||||
} else {
|
||||
pc = arg
|
||||
}
|
||||
|
||||
case compile.ITERPOP:
|
||||
n := len(iterstack) - 1
|
||||
iterstack[n].Done()
|
||||
iterstack = iterstack[:n]
|
||||
|
||||
case compile.NOT:
|
||||
stack[sp-1] = !stack[sp-1].Truth()
|
||||
|
||||
case compile.RETURN:
|
||||
result = stack[sp-1]
|
||||
break loop
|
||||
|
||||
case compile.SETINDEX:
|
||||
z := stack[sp-1]
|
||||
y := stack[sp-2]
|
||||
x := stack[sp-3]
|
||||
sp -= 3
|
||||
err = setIndex(x, y, z)
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
|
||||
case compile.INDEX:
|
||||
y := stack[sp-1]
|
||||
x := stack[sp-2]
|
||||
sp -= 2
|
||||
z, err2 := getIndex(x, y)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
stack[sp] = z
|
||||
sp++
|
||||
|
||||
case compile.ATTR:
|
||||
x := stack[sp-1]
|
||||
name := f.Prog.Names[arg]
|
||||
y, err2 := getAttr(x, name)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
stack[sp-1] = y
|
||||
|
||||
case compile.SETFIELD:
|
||||
y := stack[sp-1]
|
||||
x := stack[sp-2]
|
||||
sp -= 2
|
||||
name := f.Prog.Names[arg]
|
||||
if err2 := setField(x, name, y); err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
|
||||
case compile.MAKEDICT:
|
||||
stack[sp] = new(Dict)
|
||||
sp++
|
||||
|
||||
case compile.SETDICT, compile.SETDICTUNIQ:
|
||||
dict := stack[sp-3].(*Dict)
|
||||
k := stack[sp-2]
|
||||
v := stack[sp-1]
|
||||
sp -= 3
|
||||
oldlen := dict.Len()
|
||||
if err2 := dict.SetKey(k, v); err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
if op == compile.SETDICTUNIQ && dict.Len() == oldlen {
|
||||
err = fmt.Errorf("duplicate key: %v", k)
|
||||
break loop
|
||||
}
|
||||
|
||||
case compile.APPEND:
|
||||
elem := stack[sp-1]
|
||||
list := stack[sp-2].(*List)
|
||||
sp -= 2
|
||||
list.elems = append(list.elems, elem)
|
||||
|
||||
case compile.SLICE:
|
||||
x := stack[sp-4]
|
||||
lo := stack[sp-3]
|
||||
hi := stack[sp-2]
|
||||
step := stack[sp-1]
|
||||
sp -= 4
|
||||
res, err2 := slice(x, lo, hi, step)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
break loop
|
||||
}
|
||||
stack[sp] = res
|
||||
sp++
|
||||
|
||||
case compile.UNPACK:
|
||||
n := int(arg)
|
||||
iterable := stack[sp-1]
|
||||
sp--
|
||||
iter := Iterate(iterable)
|
||||
if iter == nil {
|
||||
err = fmt.Errorf("got %s in sequence assignment", iterable.Type())
|
||||
break loop
|
||||
}
|
||||
i := 0
|
||||
sp += n
|
||||
for i < n && iter.Next(&stack[sp-1-i]) {
|
||||
i++
|
||||
}
|
||||
var dummy Value
|
||||
if iter.Next(&dummy) {
|
||||
// NB: Len may return -1 here in obscure cases.
|
||||
err = fmt.Errorf("too many values to unpack (got %d, want %d)", Len(iterable), n)
|
||||
break loop
|
||||
}
|
||||
iter.Done()
|
||||
if i < n {
|
||||
err = fmt.Errorf("too few values to unpack (got %d, want %d)", i, n)
|
||||
break loop
|
||||
}
|
||||
|
||||
case compile.CJMP:
|
||||
if stack[sp-1].Truth() {
|
||||
pc = arg
|
||||
}
|
||||
sp--
|
||||
|
||||
case compile.CONSTANT:
|
||||
stack[sp] = fn.module.constants[arg]
|
||||
sp++
|
||||
|
||||
case compile.MAKETUPLE:
|
||||
n := int(arg)
|
||||
tuple := make(Tuple, n)
|
||||
sp -= n
|
||||
copy(tuple, stack[sp:])
|
||||
stack[sp] = tuple
|
||||
sp++
|
||||
|
||||
case compile.MAKELIST:
|
||||
n := int(arg)
|
||||
elems := make([]Value, n)
|
||||
sp -= n
|
||||
copy(elems, stack[sp:])
|
||||
stack[sp] = NewList(elems)
|
||||
sp++
|
||||
|
||||
case compile.MAKEFUNC:
|
||||
funcode := f.Prog.Functions[arg]
|
||||
tuple := stack[sp-1].(Tuple)
|
||||
n := len(tuple) - len(funcode.Freevars)
|
||||
defaults := tuple[:n:n]
|
||||
freevars := tuple[n:]
|
||||
stack[sp-1] = &Function{
|
||||
funcode: funcode,
|
||||
module: fn.module,
|
||||
defaults: defaults,
|
||||
freevars: freevars,
|
||||
}
|
||||
|
||||
case compile.LOAD:
|
||||
n := int(arg)
|
||||
module := string(stack[sp-1].(String))
|
||||
sp--
|
||||
|
||||
if thread.Load == nil {
|
||||
err = fmt.Errorf("load not implemented by this application")
|
||||
break loop
|
||||
}
|
||||
|
||||
thread.endProfSpan()
|
||||
dict, err2 := thread.Load(thread, module)
|
||||
thread.beginProfSpan()
|
||||
if err2 != nil {
|
||||
err = wrappedError{
|
||||
msg: fmt.Sprintf("cannot load %s: %v", module, err2),
|
||||
cause: err2,
|
||||
}
|
||||
break loop
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
from := string(stack[sp-1-i].(String))
|
||||
v, ok := dict[from]
|
||||
if !ok {
|
||||
err = fmt.Errorf("load: name %s not found in module %s", from, module)
|
||||
if n := spell.Nearest(from, dict.Keys()); n != "" {
|
||||
err = fmt.Errorf("%s (did you mean %s?)", err, n)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
stack[sp-1-i] = v
|
||||
}
|
||||
|
||||
case compile.SETLOCAL:
|
||||
locals[arg] = stack[sp-1]
|
||||
sp--
|
||||
|
||||
case compile.SETCELL:
|
||||
x := stack[sp-2]
|
||||
y := stack[sp-1]
|
||||
sp -= 2
|
||||
y.(*cell).v = x
|
||||
|
||||
case compile.SETGLOBAL:
|
||||
fn.module.globals[arg] = stack[sp-1]
|
||||
sp--
|
||||
|
||||
case compile.LOCAL:
|
||||
x := locals[arg]
|
||||
if x == nil {
|
||||
err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name)
|
||||
break loop
|
||||
}
|
||||
stack[sp] = x
|
||||
sp++
|
||||
|
||||
case compile.FREE:
|
||||
stack[sp] = fn.freevars[arg]
|
||||
sp++
|
||||
|
||||
case compile.CELL:
|
||||
x := stack[sp-1]
|
||||
stack[sp-1] = x.(*cell).v
|
||||
|
||||
case compile.GLOBAL:
|
||||
x := fn.module.globals[arg]
|
||||
if x == nil {
|
||||
err = fmt.Errorf("global variable %s referenced before assignment", f.Prog.Globals[arg].Name)
|
||||
break loop
|
||||
}
|
||||
stack[sp] = x
|
||||
sp++
|
||||
|
||||
case compile.PREDECLARED:
|
||||
name := f.Prog.Names[arg]
|
||||
x := fn.module.predeclared[name]
|
||||
if x == nil {
|
||||
err = fmt.Errorf("internal error: predeclared variable %s is uninitialized", name)
|
||||
break loop
|
||||
}
|
||||
stack[sp] = x
|
||||
sp++
|
||||
|
||||
case compile.UNIVERSAL:
|
||||
stack[sp] = Universe[f.Prog.Names[arg]]
|
||||
sp++
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unimplemented: %s", op)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
// ITERPOP the rest of the iterator stack.
|
||||
for _, iter := range iterstack {
|
||||
iter.Done()
|
||||
}
|
||||
|
||||
fr.locals = nil
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
type wrappedError struct {
|
||||
msg string
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e wrappedError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
// Implements the xerrors.Wrapper interface
|
||||
// https://godoc.org/golang.org/x/xerrors#Wrapper
|
||||
func (e wrappedError) Unwrap() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// mandatory is a sentinel value used in a function's defaults tuple
|
||||
// to indicate that a (keyword-only) parameter is mandatory.
|
||||
type mandatory struct{}
|
||||
|
||||
func (mandatory) String() string { return "mandatory" }
|
||||
func (mandatory) Type() string { return "mandatory" }
|
||||
func (mandatory) Freeze() {} // immutable
|
||||
func (mandatory) Truth() Bool { return False }
|
||||
func (mandatory) Hash() (uint32, error) { return 0, nil }
|
||||
|
||||
// A cell is a box containing a Value.
|
||||
// Local variables marked as cells hold their value indirectly
|
||||
// so that they may be shared by outer and inner nested functions.
|
||||
// Cells are always accessed using indirect CELL/SETCELL instructions.
|
||||
// The FreeVars tuple contains only cells.
|
||||
// The FREE instruction always yields a cell.
|
||||
type cell struct{ v Value }
|
||||
|
||||
func (c *cell) String() string { return "cell" }
|
||||
func (c *cell) Type() string { return "cell" }
|
||||
func (c *cell) Freeze() {
|
||||
if c.v != nil {
|
||||
c.v.Freeze()
|
||||
}
|
||||
}
|
||||
func (c *cell) Truth() Bool { panic("unreachable") }
|
||||
func (c *cell) Hash() (uint32, error) { panic("unreachable") }
|
||||
2104
vendor/go.starlark.net/starlark/library.go
generated
vendored
Normal file
2104
vendor/go.starlark.net/starlark/library.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
449
vendor/go.starlark.net/starlark/profile.go
generated
vendored
Normal file
449
vendor/go.starlark.net/starlark/profile.go
generated
vendored
Normal file
@@ -0,0 +1,449 @@
|
||||
// Copyright 2019 The Bazel Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package starlark
|
||||
|
||||
// This file defines a simple execution-time profiler for Starlark.
|
||||
// It measures the wall time spent executing Starlark code, and emits a
|
||||
// gzipped protocol message in pprof format (github.com/google/pprof).
|
||||
//
|
||||
// When profiling is enabled, the interpreter calls the profiler to
|
||||
// indicate the start and end of each "span" or time interval. A leaf
|
||||
// function (whether Go or Starlark) has a single span. A function that
|
||||
// calls another function has spans for each interval in which it is the
|
||||
// top of the stack. (A LOAD instruction also ends a span.)
|
||||
//
|
||||
// At the start of a span, the interpreter records the current time in
|
||||
// the thread's topmost frame. At the end of the span, it obtains the
|
||||
// time again and subtracts the span start time. The difference is added
|
||||
// to an accumulator variable in the thread. If the accumulator exceeds
|
||||
// some fixed quantum (10ms, say), the profiler records the current call
|
||||
// stack and sends it to the profiler goroutine, along with the number
|
||||
// of quanta, which are subtracted. For example, if the accumulator
|
||||
// holds 3ms and then a completed span adds 25ms to it, its value is 28ms,
|
||||
// which exceeeds 10ms. The profiler records a stack with the value 20ms
|
||||
// (2 quanta), and the accumulator is left with 8ms.
|
||||
//
|
||||
// The profiler goroutine converts the stacks into the pprof format and
|
||||
// emits a gzip-compressed protocol message to the designated output
|
||||
// file. We use a hand-written streaming proto encoder to avoid
|
||||
// dependencies on pprof and proto, and to avoid the need to
|
||||
// materialize the profile data structure in memory.
|
||||
//
|
||||
// A limitation of this profiler is that it measures wall time, which
|
||||
// does not necessarily correspond to CPU time. A CPU profiler requires
|
||||
// that only running (not runnable) threads are sampled; this is
|
||||
// commonly achieved by having the kernel deliver a (PROF) signal to an
|
||||
// arbitrary running thread, through setitimer(2). The CPU profiler in the
|
||||
// Go runtime uses this mechanism, but it is not possible for a Go
|
||||
// application to register a SIGPROF handler, nor is it possible for a
|
||||
// Go handler for some other signal to read the stack pointer of
|
||||
// the interrupted thread.
|
||||
//
|
||||
// Two caveats:
|
||||
// (1) it is tempting to send the leaf Frame directly to the profiler
|
||||
// goroutine instead of making a copy of the stack, since a Frame is a
|
||||
// spaghetti stack--a linked list. However, as soon as execution
|
||||
// resumes, the stack's Frame.pc values may be mutated, so Frames are
|
||||
// not safe to share with the asynchronous profiler goroutine.
|
||||
// (2) it is tempting to use Callables as keys in a map when tabulating
|
||||
// the pprof protocols's Function entities. However, we cannot assume
|
||||
// that Callables are valid map keys, and furthermore we must not
|
||||
// pin function values in memory indefinitely as this may cause lambda
|
||||
// values to keep their free variables live much longer than necessary.
|
||||
|
||||
// TODO(adonovan):
|
||||
// - make Start/Stop fully thread-safe.
|
||||
// - fix the pc hack.
|
||||
// - experiment with other values of quantum.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"go.starlark.net/syntax"
|
||||
)
|
||||
|
||||
// StartProfile enables time profiling of all Starlark threads,
|
||||
// and writes a profile in pprof format to w.
|
||||
// It must be followed by a call to StopProfiler to stop
|
||||
// the profiler and finalize the profile.
|
||||
//
|
||||
// StartProfile returns an error if profiling was already enabled.
|
||||
//
|
||||
// StartProfile must not be called concurrently with Starlark execution.
|
||||
func StartProfile(w io.Writer) error {
|
||||
if !atomic.CompareAndSwapUint32(&profiler.on, 0, 1) {
|
||||
return fmt.Errorf("profiler already running")
|
||||
}
|
||||
|
||||
// TODO(adonovan): make the API fully concurrency-safe.
|
||||
// The main challenge is racy reads/writes of profiler.events,
|
||||
// and of send/close races on the channel it refers to.
|
||||
// It's easy to solve them with a mutex but harder to do
|
||||
// it efficiently.
|
||||
|
||||
profiler.events = make(chan *profEvent, 1)
|
||||
profiler.done = make(chan error)
|
||||
|
||||
go profile(w)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopProfiler stops the profiler started by a prior call to
|
||||
// StartProfile and finalizes the profile. It returns an error if the
|
||||
// profile could not be completed.
|
||||
//
|
||||
// StopProfiler must not be called concurrently with Starlark execution.
|
||||
func StopProfile() error {
|
||||
// Terminate the profiler goroutine and get its result.
|
||||
close(profiler.events)
|
||||
err := <-profiler.done
|
||||
|
||||
profiler.done = nil
|
||||
profiler.events = nil
|
||||
atomic.StoreUint32(&profiler.on, 0)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// globals
|
||||
var profiler struct {
|
||||
on uint32 // nonzero => profiler running
|
||||
events chan *profEvent // profile events from interpreter threads
|
||||
done chan error // indicates profiler goroutine is ready
|
||||
}
|
||||
|
||||
func (thread *Thread) beginProfSpan() {
|
||||
if profiler.events == nil {
|
||||
return // profiling not enabled
|
||||
}
|
||||
|
||||
thread.frameAt(0).spanStart = nanotime()
|
||||
}
|
||||
|
||||
// TODO(adonovan): experiment with smaller values,
|
||||
// which trade space and time for greater precision.
|
||||
const quantum = 10 * time.Millisecond
|
||||
|
||||
func (thread *Thread) endProfSpan() {
|
||||
if profiler.events == nil {
|
||||
return // profiling not enabled
|
||||
}
|
||||
|
||||
// Add the span to the thread's accumulator.
|
||||
thread.proftime += time.Duration(nanotime() - thread.frameAt(0).spanStart)
|
||||
if thread.proftime < quantum {
|
||||
return
|
||||
}
|
||||
|
||||
// Only record complete quanta.
|
||||
n := thread.proftime / quantum
|
||||
thread.proftime -= n * quantum
|
||||
|
||||
// Copy the stack.
|
||||
// (We can't save thread.frame because its pc will change.)
|
||||
ev := &profEvent{
|
||||
thread: thread,
|
||||
time: n * quantum,
|
||||
}
|
||||
ev.stack = ev.stackSpace[:0]
|
||||
for i := range thread.stack {
|
||||
fr := thread.frameAt(i)
|
||||
ev.stack = append(ev.stack, profFrame{
|
||||
pos: fr.Position(),
|
||||
fn: fr.Callable(),
|
||||
pc: fr.pc,
|
||||
})
|
||||
}
|
||||
|
||||
profiler.events <- ev
|
||||
}
|
||||
|
||||
type profEvent struct {
|
||||
thread *Thread // currently unused
|
||||
time time.Duration
|
||||
stack []profFrame
|
||||
stackSpace [8]profFrame // initial space for stack
|
||||
}
|
||||
|
||||
type profFrame struct {
|
||||
fn Callable // don't hold this live for too long (prevents GC of lambdas)
|
||||
pc uint32 // program counter (Starlark frames only)
|
||||
pos syntax.Position // position of pc within this frame
|
||||
}
|
||||
|
||||
// profile is the profiler goroutine.
|
||||
// It runs until StopProfiler is called.
|
||||
func profile(w io.Writer) {
|
||||
// Field numbers from pprof protocol.
|
||||
// See https://github.com/google/pprof/blob/master/proto/profile.proto
|
||||
const (
|
||||
Profile_sample_type = 1 // repeated ValueType
|
||||
Profile_sample = 2 // repeated Sample
|
||||
Profile_mapping = 3 // repeated Mapping
|
||||
Profile_location = 4 // repeated Location
|
||||
Profile_function = 5 // repeated Function
|
||||
Profile_string_table = 6 // repeated string
|
||||
Profile_time_nanos = 9 // int64
|
||||
Profile_duration_nanos = 10 // int64
|
||||
Profile_period_type = 11 // ValueType
|
||||
Profile_period = 12 // int64
|
||||
|
||||
ValueType_type = 1 // int64
|
||||
ValueType_unit = 2 // int64
|
||||
|
||||
Sample_location_id = 1 // repeated uint64
|
||||
Sample_value = 2 // repeated int64
|
||||
Sample_label = 3 // repeated Label
|
||||
|
||||
Label_key = 1 // int64
|
||||
Label_str = 2 // int64
|
||||
Label_num = 3 // int64
|
||||
Label_num_unit = 4 // int64
|
||||
|
||||
Location_id = 1 // uint64
|
||||
Location_mapping_id = 2 // uint64
|
||||
Location_address = 3 // uint64
|
||||
Location_line = 4 // repeated Line
|
||||
|
||||
Line_function_id = 1 // uint64
|
||||
Line_line = 2 // int64
|
||||
|
||||
Function_id = 1 // uint64
|
||||
Function_name = 2 // int64
|
||||
Function_system_name = 3 // int64
|
||||
Function_filename = 4 // int64
|
||||
Function_start_line = 5 // int64
|
||||
)
|
||||
|
||||
bufw := bufio.NewWriter(w) // write file in 4KB (not 240B flate-sized) chunks
|
||||
gz := gzip.NewWriter(bufw)
|
||||
enc := protoEncoder{w: gz}
|
||||
|
||||
// strings
|
||||
stringIndex := make(map[string]int64)
|
||||
str := func(s string) int64 {
|
||||
i, ok := stringIndex[s]
|
||||
if !ok {
|
||||
i = int64(len(stringIndex))
|
||||
enc.string(Profile_string_table, s)
|
||||
stringIndex[s] = i
|
||||
}
|
||||
return i
|
||||
}
|
||||
str("") // entry 0
|
||||
|
||||
// functions
|
||||
//
|
||||
// function returns the ID of a Callable for use in Line.FunctionId.
|
||||
// The ID is the same as the function's logical address,
|
||||
// which is supplied by the caller to avoid the need to recompute it.
|
||||
functionId := make(map[uintptr]uint64)
|
||||
function := func(fn Callable, addr uintptr) uint64 {
|
||||
id, ok := functionId[addr]
|
||||
if !ok {
|
||||
id = uint64(addr)
|
||||
|
||||
var pos syntax.Position
|
||||
if fn, ok := fn.(callableWithPosition); ok {
|
||||
pos = fn.Position()
|
||||
}
|
||||
|
||||
name := fn.Name()
|
||||
if name == "<toplevel>" {
|
||||
name = pos.Filename()
|
||||
}
|
||||
|
||||
nameIndex := str(name)
|
||||
|
||||
fun := new(bytes.Buffer)
|
||||
funenc := protoEncoder{w: fun}
|
||||
funenc.uint(Function_id, id)
|
||||
funenc.int(Function_name, nameIndex)
|
||||
funenc.int(Function_system_name, nameIndex)
|
||||
funenc.int(Function_filename, str(pos.Filename()))
|
||||
funenc.int(Function_start_line, int64(pos.Line))
|
||||
enc.bytes(Profile_function, fun.Bytes())
|
||||
|
||||
functionId[addr] = id
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// locations
|
||||
//
|
||||
// location returns the ID of the location denoted by fr.
|
||||
// For Starlark frames, this is the Frame pc.
|
||||
locationId := make(map[uintptr]uint64)
|
||||
location := func(fr profFrame) uint64 {
|
||||
fnAddr := profFuncAddr(fr.fn)
|
||||
|
||||
// For Starlark functions, the frame position
|
||||
// represents the current PC value.
|
||||
// Mix it into the low bits of the address.
|
||||
// This is super hacky and may result in collisions
|
||||
// in large functions or if functions are numerous.
|
||||
// TODO(adonovan): fix: try making this cleaner by treating
|
||||
// each bytecode segment as a Profile.Mapping.
|
||||
pcAddr := fnAddr
|
||||
if _, ok := fr.fn.(*Function); ok {
|
||||
pcAddr = (pcAddr << 16) ^ uintptr(fr.pc)
|
||||
}
|
||||
|
||||
id, ok := locationId[pcAddr]
|
||||
if !ok {
|
||||
id = uint64(pcAddr)
|
||||
|
||||
line := new(bytes.Buffer)
|
||||
lineenc := protoEncoder{w: line}
|
||||
lineenc.uint(Line_function_id, function(fr.fn, fnAddr))
|
||||
lineenc.int(Line_line, int64(fr.pos.Line))
|
||||
loc := new(bytes.Buffer)
|
||||
locenc := protoEncoder{w: loc}
|
||||
locenc.uint(Location_id, id)
|
||||
locenc.uint(Location_address, uint64(pcAddr))
|
||||
locenc.bytes(Location_line, line.Bytes())
|
||||
enc.bytes(Profile_location, loc.Bytes())
|
||||
|
||||
locationId[pcAddr] = id
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
wallNanos := new(bytes.Buffer)
|
||||
wnenc := protoEncoder{w: wallNanos}
|
||||
wnenc.int(ValueType_type, str("wall"))
|
||||
wnenc.int(ValueType_unit, str("nanoseconds"))
|
||||
|
||||
// informational fields of Profile
|
||||
enc.bytes(Profile_sample_type, wallNanos.Bytes())
|
||||
enc.int(Profile_period, quantum.Nanoseconds()) // magnitude of sampling period
|
||||
enc.bytes(Profile_period_type, wallNanos.Bytes()) // dimension and unit of period
|
||||
enc.int(Profile_time_nanos, time.Now().UnixNano()) // start (real) time of profile
|
||||
|
||||
startNano := nanotime()
|
||||
|
||||
// Read profile events from the channel
|
||||
// until it is closed by StopProfiler.
|
||||
for e := range profiler.events {
|
||||
sample := new(bytes.Buffer)
|
||||
sampleenc := protoEncoder{w: sample}
|
||||
sampleenc.int(Sample_value, e.time.Nanoseconds()) // wall nanoseconds
|
||||
for _, fr := range e.stack {
|
||||
sampleenc.uint(Sample_location_id, location(fr))
|
||||
}
|
||||
enc.bytes(Profile_sample, sample.Bytes())
|
||||
}
|
||||
|
||||
endNano := nanotime()
|
||||
enc.int(Profile_duration_nanos, endNano-startNano)
|
||||
|
||||
err := gz.Close() // Close reports any prior write error
|
||||
if flushErr := bufw.Flush(); err == nil {
|
||||
err = flushErr
|
||||
}
|
||||
profiler.done <- err
|
||||
}
|
||||
|
||||
// nanotime returns the time in nanoseconds since epoch.
|
||||
// It is implemented by runtime.nanotime using the linkname hack;
|
||||
// runtime.nanotime is defined for all OSs/ARCHS and uses the
|
||||
// monotonic system clock, which there is no portable way to access.
|
||||
// Should that function ever go away, these alternatives exist:
|
||||
//
|
||||
// // POSIX only. REALTIME not MONOTONIC. 17ns.
|
||||
// var tv syscall.Timeval
|
||||
// syscall.Gettimeofday(&tv) // can't fail
|
||||
// return tv.Nano()
|
||||
//
|
||||
// // Portable. REALTIME not MONOTONIC. 46ns.
|
||||
// return time.Now().Nanoseconds()
|
||||
//
|
||||
// // POSIX only. Adds a dependency.
|
||||
// import "golang.org/x/sys/unix"
|
||||
// var ts unix.Timespec
|
||||
// unix.ClockGettime(CLOCK_MONOTONIC, &ts) // can't fail
|
||||
// return unix.TimespecToNsec(ts)
|
||||
//
|
||||
//go:linkname nanotime runtime.nanotime
|
||||
func nanotime() int64
|
||||
|
||||
// profFuncAddr returns the canonical "address"
|
||||
// of a Callable for use by the profiler.
|
||||
func profFuncAddr(fn Callable) uintptr {
|
||||
switch fn := fn.(type) {
|
||||
case *Builtin:
|
||||
return reflect.ValueOf(fn.fn).Pointer()
|
||||
case *Function:
|
||||
return uintptr(unsafe.Pointer(fn.funcode))
|
||||
}
|
||||
|
||||
// User-defined callable types are typically of
|
||||
// of kind pointer-to-struct. Handle them specially.
|
||||
if v := reflect.ValueOf(fn); v.Type().Kind() == reflect.Ptr {
|
||||
return v.Pointer()
|
||||
}
|
||||
|
||||
// Address zero is reserved by the protocol.
|
||||
// Use 1 for callables we don't recognize.
|
||||
log.Printf("Starlark profiler: no address for Callable %T", fn)
|
||||
return 1
|
||||
}
|
||||
|
||||
// We encode the protocol message by hand to avoid making
|
||||
// the interpreter depend on both github.com/google/pprof
|
||||
// and github.com/golang/protobuf.
|
||||
//
|
||||
// This also avoids the need to materialize a protocol message object
|
||||
// tree of unbounded size and serialize it all at the end.
|
||||
// The pprof format appears to have been designed to
|
||||
// permit streaming implementations such as this one.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/encoding.
|
||||
type protoEncoder struct {
|
||||
w io.Writer // *bytes.Buffer or *gzip.Writer
|
||||
tmp [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (e *protoEncoder) uvarint(x uint64) {
|
||||
n := binary.PutUvarint(e.tmp[:], x)
|
||||
e.w.Write(e.tmp[:n])
|
||||
}
|
||||
|
||||
func (e *protoEncoder) tag(field, wire uint) {
|
||||
e.uvarint(uint64(field<<3 | wire))
|
||||
}
|
||||
|
||||
func (e *protoEncoder) string(field uint, s string) {
|
||||
e.tag(field, 2) // length-delimited
|
||||
e.uvarint(uint64(len(s)))
|
||||
io.WriteString(e.w, s)
|
||||
}
|
||||
|
||||
func (e *protoEncoder) bytes(field uint, b []byte) {
|
||||
e.tag(field, 2) // length-delimited
|
||||
e.uvarint(uint64(len(b)))
|
||||
e.w.Write(b)
|
||||
}
|
||||
|
||||
func (e *protoEncoder) uint(field uint, x uint64) {
|
||||
e.tag(field, 0) // varint
|
||||
e.uvarint(x)
|
||||
}
|
||||
|
||||
func (e *protoEncoder) int(field uint, x int64) {
|
||||
e.tag(field, 0) // varint
|
||||
e.uvarint(uint64(x))
|
||||
}
|
||||
258
vendor/go.starlark.net/starlark/unpack.go
generated
vendored
Normal file
258
vendor/go.starlark.net/starlark/unpack.go
generated
vendored
Normal file
@@ -0,0 +1,258 @@
|
||||
package starlark
|
||||
|
||||
// This file defines the Unpack helper functions used by
|
||||
// built-in functions to interpret their call arguments.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UnpackArgs unpacks the positional and keyword arguments into the
|
||||
// supplied parameter variables. pairs is an alternating list of names
|
||||
// and pointers to variables.
|
||||
//
|
||||
// If the variable is a bool, int, string, *List, *Dict, Callable,
|
||||
// Iterable, or user-defined implementation of Value,
|
||||
// UnpackArgs performs the appropriate type check.
|
||||
// An int uses the AsInt32 check.
|
||||
// If the parameter name ends with "?",
|
||||
// it and all following parameters are optional.
|
||||
//
|
||||
// If the variable implements Value, UnpackArgs may call
|
||||
// its Type() method while constructing the error message.
|
||||
//
|
||||
// Beware: an optional *List, *Dict, Callable, Iterable, or Value variable that is
|
||||
// not assigned is not a valid Starlark Value, so the caller must
|
||||
// explicitly handle such cases by interpreting nil as None or some
|
||||
// computed default.
|
||||
func UnpackArgs(fnname string, args Tuple, kwargs []Tuple, pairs ...interface{}) error {
|
||||
nparams := len(pairs) / 2
|
||||
var defined intset
|
||||
defined.init(nparams)
|
||||
|
||||
paramName := func(x interface{}) string { // (no free variables)
|
||||
name := x.(string)
|
||||
if name[len(name)-1] == '?' {
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// positional arguments
|
||||
if len(args) > nparams {
|
||||
return fmt.Errorf("%s: got %d arguments, want at most %d",
|
||||
fnname, len(args), nparams)
|
||||
}
|
||||
for i, arg := range args {
|
||||
defined.set(i)
|
||||
if err := unpackOneArg(arg, pairs[2*i+1]); err != nil {
|
||||
name := paramName(pairs[2*i])
|
||||
return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// keyword arguments
|
||||
kwloop:
|
||||
for _, item := range kwargs {
|
||||
name, arg := item[0].(String), item[1]
|
||||
for i := 0; i < nparams; i++ {
|
||||
if paramName(pairs[2*i]) == string(name) {
|
||||
// found it
|
||||
if defined.set(i) {
|
||||
return fmt.Errorf("%s: got multiple values for keyword argument %s",
|
||||
fnname, name)
|
||||
}
|
||||
ptr := pairs[2*i+1]
|
||||
if err := unpackOneArg(arg, ptr); err != nil {
|
||||
return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err)
|
||||
}
|
||||
continue kwloop
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%s: unexpected keyword argument %s", fnname, name)
|
||||
}
|
||||
|
||||
// Check that all non-optional parameters are defined.
|
||||
// (We needn't check the first len(args).)
|
||||
for i := len(args); i < nparams; i++ {
|
||||
name := pairs[2*i].(string)
|
||||
if strings.HasSuffix(name, "?") {
|
||||
break // optional
|
||||
}
|
||||
if !defined.get(i) {
|
||||
return fmt.Errorf("%s: missing argument for %s", fnname, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnpackPositionalArgs unpacks the positional arguments into
|
||||
// corresponding variables. Each element of vars is a pointer; see
|
||||
// UnpackArgs for allowed types and conversions.
|
||||
//
|
||||
// UnpackPositionalArgs reports an error if the number of arguments is
|
||||
// less than min or greater than len(vars), if kwargs is nonempty, or if
|
||||
// any conversion fails.
|
||||
func UnpackPositionalArgs(fnname string, args Tuple, kwargs []Tuple, min int, vars ...interface{}) error {
|
||||
if len(kwargs) > 0 {
|
||||
return fmt.Errorf("%s: unexpected keyword arguments", fnname)
|
||||
}
|
||||
max := len(vars)
|
||||
if len(args) < min {
|
||||
var atleast string
|
||||
if min < max {
|
||||
atleast = "at least "
|
||||
}
|
||||
return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atleast, min)
|
||||
}
|
||||
if len(args) > max {
|
||||
var atmost string
|
||||
if max > min {
|
||||
atmost = "at most "
|
||||
}
|
||||
return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atmost, max)
|
||||
}
|
||||
for i, arg := range args {
|
||||
if err := unpackOneArg(arg, vars[i]); err != nil {
|
||||
return fmt.Errorf("%s: for parameter %d: %s", fnname, i+1, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unpackOneArg(v Value, ptr interface{}) error {
|
||||
// On failure, don't clobber *ptr.
|
||||
switch ptr := ptr.(type) {
|
||||
case *Value:
|
||||
*ptr = v
|
||||
case *string:
|
||||
s, ok := AsString(v)
|
||||
if !ok {
|
||||
return fmt.Errorf("got %s, want string", v.Type())
|
||||
}
|
||||
*ptr = s
|
||||
case *bool:
|
||||
b, ok := v.(Bool)
|
||||
if !ok {
|
||||
return fmt.Errorf("got %s, want bool", v.Type())
|
||||
}
|
||||
*ptr = bool(b)
|
||||
case *int:
|
||||
i, err := AsInt32(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ptr = i
|
||||
case **List:
|
||||
list, ok := v.(*List)
|
||||
if !ok {
|
||||
return fmt.Errorf("got %s, want list", v.Type())
|
||||
}
|
||||
*ptr = list
|
||||
case **Dict:
|
||||
dict, ok := v.(*Dict)
|
||||
if !ok {
|
||||
return fmt.Errorf("got %s, want dict", v.Type())
|
||||
}
|
||||
*ptr = dict
|
||||
case *Callable:
|
||||
f, ok := v.(Callable)
|
||||
if !ok {
|
||||
return fmt.Errorf("got %s, want callable", v.Type())
|
||||
}
|
||||
*ptr = f
|
||||
case *Iterable:
|
||||
it, ok := v.(Iterable)
|
||||
if !ok {
|
||||
return fmt.Errorf("got %s, want iterable", v.Type())
|
||||
}
|
||||
*ptr = it
|
||||
default:
|
||||
// v must have type *V, where V is some subtype of starlark.Value.
|
||||
ptrv := reflect.ValueOf(ptr)
|
||||
if ptrv.Kind() != reflect.Ptr {
|
||||
log.Panicf("internal error: not a pointer: %T", ptr)
|
||||
}
|
||||
paramVar := ptrv.Elem()
|
||||
if !reflect.TypeOf(v).AssignableTo(paramVar.Type()) {
|
||||
// The value is not assignable to the variable.
|
||||
|
||||
// Detect a possible bug in the Go program that called Unpack:
|
||||
// If the variable *ptr is not a subtype of Value,
|
||||
// no value of v can possibly work.
|
||||
if !paramVar.Type().AssignableTo(reflect.TypeOf(new(Value)).Elem()) {
|
||||
log.Panicf("pointer element type does not implement Value: %T", ptr)
|
||||
}
|
||||
|
||||
// Report Starlark dynamic type error.
|
||||
//
|
||||
// We prefer the Starlark Value.Type name over
|
||||
// its Go reflect.Type name, but calling the
|
||||
// Value.Type method on the variable is not safe
|
||||
// in general. If the variable is an interface,
|
||||
// the call will fail. Even if the variable has
|
||||
// a concrete type, it might not be safe to call
|
||||
// Type() on a zero instance. Thus we must use
|
||||
// recover.
|
||||
|
||||
// Default to Go reflect.Type name
|
||||
paramType := paramVar.Type().String()
|
||||
|
||||
// Attempt to call Value.Type method.
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
paramType = paramVar.MethodByName("Type").Call(nil)[0].String()
|
||||
}()
|
||||
return fmt.Errorf("got %s, want %s", v.Type(), paramType)
|
||||
}
|
||||
paramVar.Set(reflect.ValueOf(v))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type intset struct {
|
||||
small uint64 // bitset, used if n < 64
|
||||
large map[int]bool // set, used if n >= 64
|
||||
}
|
||||
|
||||
func (is *intset) init(n int) {
|
||||
if n >= 64 {
|
||||
is.large = make(map[int]bool)
|
||||
}
|
||||
}
|
||||
|
||||
func (is *intset) set(i int) (prev bool) {
|
||||
if is.large == nil {
|
||||
prev = is.small&(1<<uint(i)) != 0
|
||||
is.small |= 1 << uint(i)
|
||||
} else {
|
||||
prev = is.large[i]
|
||||
is.large[i] = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (is *intset) get(i int) bool {
|
||||
if is.large == nil {
|
||||
return is.small&(1<<uint(i)) != 0
|
||||
}
|
||||
return is.large[i]
|
||||
}
|
||||
|
||||
func (is *intset) len() int {
|
||||
if is.large == nil {
|
||||
// Suboptimal, but used only for error reporting.
|
||||
len := 0
|
||||
for i := 0; i < 64; i++ {
|
||||
if is.small&(1<<uint(i)) != 0 {
|
||||
len++
|
||||
}
|
||||
}
|
||||
return len
|
||||
}
|
||||
return len(is.large)
|
||||
}
|
||||
1293
vendor/go.starlark.net/starlark/value.go
generated
vendored
Normal file
1293
vendor/go.starlark.net/starlark/value.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user