update dependencies (#6519)

Signed-off-by: hongming <coder.scala@gmail.com>
This commit is contained in:
hongming
2025-05-29 15:51:37 +08:00
committed by GitHub
parent 281b2091a5
commit 4d88ed2ca1
2573 changed files with 171946 additions and 52199 deletions

View File

@@ -48,6 +48,7 @@ type ConsumeFuzzer struct {
NumberOfCalls int
position uint32
fuzzUnexportedFields bool
forceUTF8Strings bool
curDepth int
Funcs map[reflect.Type]reflect.Value
}
@@ -104,6 +105,14 @@ func (f *ConsumeFuzzer) DisallowUnexportedFields() {
f.fuzzUnexportedFields = false
}
func (f *ConsumeFuzzer) AllowNonUTF8Strings() {
f.forceUTF8Strings = false
}
func (f *ConsumeFuzzer) DisallowNonUTF8Strings() {
f.forceUTF8Strings = true
}
func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
e := reflect.ValueOf(targetStruct).Elem()
return f.fuzzStruct(e, false)
@@ -224,6 +233,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error
if e.CanSet() {
e.Set(uu)
}
case reflect.Uint:
newInt, err := f.GetUint()
if err != nil {
return err
}
if e.CanSet() {
e.SetUint(uint64(newInt))
}
case reflect.Uint16:
newInt, err := f.GetUint16()
if err != nil {
@@ -309,6 +326,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error
if e.CanSet() {
e.SetUint(uint64(b))
}
case reflect.Bool:
b, err := f.GetBool()
if err != nil {
return err
}
if e.CanSet() {
e.SetBool(b)
}
}
return nil
}
@@ -410,6 +435,23 @@ func (f *ConsumeFuzzer) GetUint64() (uint64, error) {
return binary.BigEndian.Uint64(u64), nil
}
func (f *ConsumeFuzzer) GetUint() (uint, error) {
var zero uint
size := int(unsafe.Sizeof(zero))
if size == 8 {
u64, err := f.GetUint64()
if err != nil {
return 0, err
}
return uint(u64), nil
}
u32, err := f.GetUint32()
if err != nil {
return 0, err
}
return uint(u32), nil
}
func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
var length uint32
var err error
@@ -461,7 +503,11 @@ func (f *ConsumeFuzzer) GetString() (string, error) {
return "nil", errors.New("numbers overflow")
}
f.position = byteBegin + length
return string(f.data[byteBegin:f.position]), nil
s := string(f.data[byteBegin:f.position])
if f.forceUTF8Strings {
s = strings.ToValidUTF8(s, "")
}
return s, nil
}
func (f *ConsumeFuzzer) GetBool() (bool, error) {

View File

@@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) {
return nextState, err
}
switch {
case isOscStringTerminator(b):
// There are several control characters and sequences which can
// terminate an OSC string. Most of them are handled by the baseState
// handler. The ANSI_BEL character is a special case which behaves as a
// terminator only for an OSC string.
if b == ANSI_BEL {
return oscState.parser.ground, nil
}
return oscState, nil
}
// See below for OSC string terminators for linux
// http://man7.org/linux/man-pages/man4/console_codes.4.html
func isOscStringTerminator(b byte) bool {
if b == ANSI_BEL || b == 0x5C {
return true
}
return false
}

17
vendor/github.com/Azure/go-ntlmssp/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,17 @@
sudo: false
language: go
before_script:
- go get -u golang.org/x/lint/golint
go:
- 1.10.x
- master
script:
- test -z "$(gofmt -s -l . | tee /dev/stderr)"
- test -z "$(golint ./... | tee /dev/stderr)"
- go vet ./...
- go build -v ./...
- go test -v ./...

21
vendor/github.com/Azure/go-ntlmssp/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

29
vendor/github.com/Azure/go-ntlmssp/README.md generated vendored Normal file
View File

@@ -0,0 +1,29 @@
# go-ntlmssp
Golang package that provides NTLM/Negotiate authentication over HTTP
[![GoDoc](https://godoc.org/github.com/Azure/go-ntlmssp?status.svg)](https://godoc.org/github.com/Azure/go-ntlmssp) [![Build Status](https://travis-ci.org/Azure/go-ntlmssp.svg?branch=dev)](https://travis-ci.org/Azure/go-ntlmssp)
Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx
Implementation hints from http://davenport.sourceforge.net/ntlm.html
This package only implements authentication, no key exchange or encryption. It
only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding.
This package implements NTLMv2.
# Usage
```
url, user, password := "http://www.example.com/secrets", "robpike", "pw123"
client := &http.Client{
Transport: ntlmssp.Negotiator{
RoundTripper:&http.Transport{},
},
}
req, _ := http.NewRequest("GET", url, nil)
req.SetBasicAuth(user, password)
res, _ := client.Do(req)
```
-----
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

41
vendor/github.com/Azure/go-ntlmssp/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

View File

@@ -0,0 +1,187 @@
package ntlmssp
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"errors"
"strings"
"time"
)
type authenicateMessage struct {
LmChallengeResponse []byte
NtChallengeResponse []byte
TargetName string
UserName string
// only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH
EncryptedRandomSessionKey []byte
NegotiateFlags negotiateFlags
MIC []byte
}
type authenticateMessageFields struct {
messageHeader
LmChallengeResponse varField
NtChallengeResponse varField
TargetName varField
UserName varField
Workstation varField
_ [8]byte
NegotiateFlags negotiateFlags
}
func (m authenicateMessage) MarshalBinary() ([]byte, error) {
if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) {
return nil, errors.New("Only unicode is supported")
}
target, user := toUnicode(m.TargetName), toUnicode(m.UserName)
workstation := toUnicode("")
ptr := binary.Size(&authenticateMessageFields{})
f := authenticateMessageFields{
messageHeader: newMessageHeader(3),
NegotiateFlags: m.NegotiateFlags,
LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)),
NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)),
TargetName: newVarField(&ptr, len(target)),
UserName: newVarField(&ptr, len(user)),
Workstation: newVarField(&ptr, len(workstation)),
}
f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION)
b := bytes.Buffer{}
if err := binary.Write(&b, binary.LittleEndian, &f); err != nil {
return nil, err
}
if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil {
return nil, err
}
if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil {
return nil, err
}
if err := binary.Write(&b, binary.LittleEndian, &target); err != nil {
return nil, err
}
if err := binary.Write(&b, binary.LittleEndian, &user); err != nil {
return nil, err
}
if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil {
return nil, err
}
return b.Bytes(), nil
}
//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message
//that was received from the server
func ProcessChallenge(challengeMessageData []byte, user, password string, domainNeeded bool) ([]byte, error) {
if user == "" && password == "" {
return nil, errors.New("Anonymous authentication not supported")
}
var cm challengeMessage
if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
return nil, err
}
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
}
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
}
if !domainNeeded {
cm.TargetName = ""
}
am := authenicateMessage{
UserName: user,
TargetName: cm.TargetName,
NegotiateFlags: cm.NegotiateFlags,
}
timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
if timestamp == nil { // no time sent, take current time
ft := uint64(time.Now().UnixNano()) / 100
ft += 116444736000000000 // add time between unix & windows offset
timestamp = make([]byte, 8)
binary.LittleEndian.PutUint64(timestamp, ft)
}
clientChallenge := make([]byte, 8)
rand.Reader.Read(clientChallenge)
ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName)
am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
if cm.TargetInfoRaw == nil {
am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
cm.ServerChallenge[:], clientChallenge)
}
return am.MarshalBinary()
}
func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) {
if user == "" && hash == "" {
return nil, errors.New("Anonymous authentication not supported")
}
var cm challengeMessage
if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
return nil, err
}
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
}
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
}
am := authenicateMessage{
UserName: user,
TargetName: cm.TargetName,
NegotiateFlags: cm.NegotiateFlags,
}
timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
if timestamp == nil { // no time sent, take current time
ft := uint64(time.Now().UnixNano()) / 100
ft += 116444736000000000 // add time between unix & windows offset
timestamp = make([]byte, 8)
binary.LittleEndian.PutUint64(timestamp, ft)
}
clientChallenge := make([]byte, 8)
rand.Reader.Read(clientChallenge)
hashParts := strings.Split(hash, ":")
if len(hashParts) > 1 {
hash = hashParts[1]
}
hashBytes, err := hex.DecodeString(hash)
if err != nil {
return nil, err
}
ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName))
am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
if cm.TargetInfoRaw == nil {
am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
cm.ServerChallenge[:], clientChallenge)
}
return am.MarshalBinary()
}

66
vendor/github.com/Azure/go-ntlmssp/authheader.go generated vendored Normal file
View File

@@ -0,0 +1,66 @@
package ntlmssp
import (
"encoding/base64"
"strings"
)
type authheader []string
func (h authheader) IsBasic() bool {
for _, s := range h {
if strings.HasPrefix(string(s), "Basic ") {
return true
}
}
return false
}
func (h authheader) Basic() string {
for _, s := range h {
if strings.HasPrefix(string(s), "Basic ") {
return s
}
}
return ""
}
func (h authheader) IsNegotiate() bool {
for _, s := range h {
if strings.HasPrefix(string(s), "Negotiate") {
return true
}
}
return false
}
func (h authheader) IsNTLM() bool {
for _, s := range h {
if strings.HasPrefix(string(s), "NTLM") {
return true
}
}
return false
}
func (h authheader) GetData() ([]byte, error) {
for _, s := range h {
if strings.HasPrefix(string(s), "NTLM") || strings.HasPrefix(string(s), "Negotiate") || strings.HasPrefix(string(s), "Basic ") {
p := strings.Split(string(s), " ")
if len(p) < 2 {
return nil, nil
}
return base64.StdEncoding.DecodeString(string(p[1]))
}
}
return nil, nil
}
func (h authheader) GetBasicCreds() (username, password string, err error) {
d, err := h.GetData()
if err != nil {
return "", "", err
}
parts := strings.SplitN(string(d), ":", 2)
return parts[0], parts[1], nil
}

17
vendor/github.com/Azure/go-ntlmssp/avids.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
package ntlmssp
type avID uint16
const (
avIDMsvAvEOL avID = iota
avIDMsvAvNbComputerName
avIDMsvAvNbDomainName
avIDMsvAvDNSComputerName
avIDMsvAvDNSDomainName
avIDMsvAvDNSTreeName
avIDMsvAvFlags
avIDMsvAvTimestamp
avIDMsvAvSingleHost
avIDMsvAvTargetName
avIDMsvChannelBindings
)

View File

@@ -0,0 +1,82 @@
package ntlmssp
import (
"bytes"
"encoding/binary"
"fmt"
)
type challengeMessageFields struct {
messageHeader
TargetName varField
NegotiateFlags negotiateFlags
ServerChallenge [8]byte
_ [8]byte
TargetInfo varField
}
func (m challengeMessageFields) IsValid() bool {
return m.messageHeader.IsValid() && m.MessageType == 2
}
type challengeMessage struct {
challengeMessageFields
TargetName string
TargetInfo map[avID][]byte
TargetInfoRaw []byte
}
func (m *challengeMessage) UnmarshalBinary(data []byte) error {
r := bytes.NewReader(data)
err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields)
if err != nil {
return err
}
if !m.challengeMessageFields.IsValid() {
return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader)
}
if m.challengeMessageFields.TargetName.Len > 0 {
m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE))
if err != nil {
return err
}
}
if m.challengeMessageFields.TargetInfo.Len > 0 {
d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data)
m.TargetInfoRaw = d
if err != nil {
return err
}
m.TargetInfo = make(map[avID][]byte)
r := bytes.NewReader(d)
for {
var id avID
var l uint16
err = binary.Read(r, binary.LittleEndian, &id)
if err != nil {
return err
}
if id == avIDMsvAvEOL {
break
}
err = binary.Read(r, binary.LittleEndian, &l)
if err != nil {
return err
}
value := make([]byte, l)
n, err := r.Read(value)
if err != nil {
return err
}
if n != int(l) {
return fmt.Errorf("Expected to read %d bytes, got only %d", l, n)
}
m.TargetInfo[id] = value
}
}
return nil
}

21
vendor/github.com/Azure/go-ntlmssp/messageheader.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
package ntlmssp
import (
"bytes"
)
var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0}
type messageHeader struct {
Signature [8]byte
MessageType uint32
}
func (h messageHeader) IsValid() bool {
return bytes.Equal(h.Signature[:], signature[:]) &&
h.MessageType > 0 && h.MessageType < 4
}
func newMessageHeader(messageType uint32) messageHeader {
return messageHeader{signature, messageType}
}

52
vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package ntlmssp
type negotiateFlags uint32
const (
/*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0
/*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1
/*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2
/*D*/
negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4
/*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5
/*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6
/*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7
/*H*/
negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9
/*J*/
negotiateFlagANONYMOUS = 1 << 11
/*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12
/*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13
/*M*/
negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15
/*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16
/*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17
/*P*/
negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19
/*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20
/*R*/
negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22
/*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23
/*T*/
negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25
/*U*/
negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29
/*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30
/*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31
)
func (field negotiateFlags) Has(flags negotiateFlags) bool {
return field&flags == flags
}
func (field *negotiateFlags) Unset(flags negotiateFlags) {
*field = *field ^ (*field & flags)
}

View File

@@ -0,0 +1,64 @@
package ntlmssp
import (
"bytes"
"encoding/binary"
"errors"
"strings"
)
const expMsgBodyLen = 40
type negotiateMessageFields struct {
messageHeader
NegotiateFlags negotiateFlags
Domain varField
Workstation varField
Version
}
var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO |
negotiateFlagNTLMSSPNEGOTIATE56 |
negotiateFlagNTLMSSPNEGOTIATE128 |
negotiateFlagNTLMSSPNEGOTIATEUNICODE |
negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY
//NewNegotiateMessage creates a new NEGOTIATE message with the
//flags that this package supports.
func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) {
payloadOffset := expMsgBodyLen
flags := defaultFlags
if domainName != "" {
flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED
}
if workstationName != "" {
flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED
}
msg := negotiateMessageFields{
messageHeader: newMessageHeader(1),
NegotiateFlags: flags,
Domain: newVarField(&payloadOffset, len(domainName)),
Workstation: newVarField(&payloadOffset, len(workstationName)),
Version: DefaultVersion(),
}
b := bytes.Buffer{}
if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil {
return nil, err
}
if b.Len() != expMsgBodyLen {
return nil, errors.New("incorrect body length")
}
payload := strings.ToUpper(domainName + workstationName)
if _, err := b.WriteString(payload); err != nil {
return nil, err
}
return b.Bytes(), nil
}

151
vendor/github.com/Azure/go-ntlmssp/negotiator.go generated vendored Normal file
View File

@@ -0,0 +1,151 @@
package ntlmssp
import (
"bytes"
"encoding/base64"
"io"
"io/ioutil"
"net/http"
"strings"
)
// GetDomain : parse domain name from based on slashes in the input
// Need to check for upn as well
func GetDomain(user string) (string, string, bool) {
domain := ""
domainNeeded := false
if strings.Contains(user, "\\") {
ucomponents := strings.SplitN(user, "\\", 2)
domain = ucomponents[0]
user = ucomponents[1]
domainNeeded = true
} else if strings.Contains(user, "@") {
domainNeeded = false
} else {
domainNeeded = true
}
return user, domain, domainNeeded
}
//Negotiator is a http.Roundtripper decorator that automatically
//converts basic authentication to NTLM/Negotiate authentication when appropriate.
type Negotiator struct{ http.RoundTripper }
//RoundTrip sends the request to the server, handling any authentication
//re-sends as needed.
func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) {
// Use default round tripper if not provided
rt := l.RoundTripper
if rt == nil {
rt = http.DefaultTransport
}
// If it is not basic auth, just round trip the request as usual
reqauth := authheader(req.Header.Values("Authorization"))
if !reqauth.IsBasic() {
return rt.RoundTrip(req)
}
reqauthBasic := reqauth.Basic()
// Save request body
body := bytes.Buffer{}
if req.Body != nil {
_, err = body.ReadFrom(req.Body)
if err != nil {
return nil, err
}
req.Body.Close()
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
}
// first try anonymous, in case the server still finds us
// authenticated from previous traffic
req.Header.Del("Authorization")
res, err = rt.RoundTrip(req)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusUnauthorized {
return res, err
}
resauth := authheader(res.Header.Values("Www-Authenticate"))
if !resauth.IsNegotiate() && !resauth.IsNTLM() {
// Unauthorized, Negotiate not requested, let's try with basic auth
req.Header.Set("Authorization", string(reqauthBasic))
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
res, err = rt.RoundTrip(req)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusUnauthorized {
return res, err
}
resauth = authheader(res.Header.Values("Www-Authenticate"))
}
if resauth.IsNegotiate() || resauth.IsNTLM() {
// 401 with request:Basic and response:Negotiate
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
// recycle credentials
u, p, err := reqauth.GetBasicCreds()
if err != nil {
return nil, err
}
// get domain from username
domain := ""
u, domain, domainNeeded := GetDomain(u)
// send negotiate
negotiateMessage, err := NewNegotiateMessage(domain, "")
if err != nil {
return nil, err
}
if resauth.IsNTLM() {
req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage))
} else {
req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage))
}
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
res, err = rt.RoundTrip(req)
if err != nil {
return nil, err
}
// receive challenge?
resauth = authheader(res.Header.Values("Www-Authenticate"))
challengeMessage, err := resauth.GetData()
if err != nil {
return nil, err
}
if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 {
// Negotiation failed, let client deal with response
return res, nil
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
// send authenticate
authenticateMessage, err := ProcessChallenge(challengeMessage, u, p, domainNeeded)
if err != nil {
return nil, err
}
if resauth.IsNTLM() {
req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage))
} else {
req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage))
}
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
return rt.RoundTrip(req)
}
return res, err
}

51
vendor/github.com/Azure/go-ntlmssp/nlmp.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
// Package ntlmssp provides NTLM/Negotiate authentication over HTTP
//
// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx,
// implementation hints from http://davenport.sourceforge.net/ntlm.html .
// This package only implements authentication, no key exchange or encryption. It
// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding.
// This package implements NTLMv2.
package ntlmssp
import (
"crypto/hmac"
"crypto/md5"
"golang.org/x/crypto/md4"
"strings"
)
func getNtlmV2Hash(password, username, target string) []byte {
return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target))
}
func getNtlmHash(password string) []byte {
hash := md4.New()
hash.Write(toUnicode(password))
return hash.Sum(nil)
}
func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge,
timestamp, targetInfo []byte) []byte {
temp := []byte{1, 1, 0, 0, 0, 0, 0, 0}
temp = append(temp, timestamp...)
temp = append(temp, clientChallenge...)
temp = append(temp, 0, 0, 0, 0)
temp = append(temp, targetInfo...)
temp = append(temp, 0, 0, 0, 0)
NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp)
return append(NTProofStr, temp...)
}
func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte {
return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...)
}
func hmacMd5(key []byte, data ...[]byte) []byte {
mac := hmac.New(md5.New, key)
for _, d := range data {
mac.Write(d)
}
return mac.Sum(nil)
}

29
vendor/github.com/Azure/go-ntlmssp/unicode.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
package ntlmssp
import (
"bytes"
"encoding/binary"
"errors"
"unicode/utf16"
)
// helper func's for dealing with Windows Unicode (UTF16LE)
func fromUnicode(d []byte) (string, error) {
if len(d)%2 > 0 {
return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length")
}
s := make([]uint16, len(d)/2)
err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s)
if err != nil {
return "", err
}
return string(utf16.Decode(s)), nil
}
func toUnicode(s string) []byte {
uints := utf16.Encode([]rune(s))
b := bytes.Buffer{}
binary.Write(&b, binary.LittleEndian, &uints)
return b.Bytes()
}

40
vendor/github.com/Azure/go-ntlmssp/varfield.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
package ntlmssp
import (
"errors"
)
type varField struct {
Len uint16
MaxLen uint16
BufferOffset uint32
}
func (f varField) ReadFrom(buffer []byte) ([]byte, error) {
if len(buffer) < int(f.BufferOffset+uint32(f.Len)) {
return nil, errors.New("Error reading data, varField extends beyond buffer")
}
return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil
}
func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) {
d, err := f.ReadFrom(buffer)
if err != nil {
return "", err
}
if unicode { // UTF-16LE encoding scheme
return fromUnicode(d)
}
// OEM encoding, close enough to ASCII, since no code page is specified
return string(d), err
}
func newVarField(ptr *int, fieldsize int) varField {
f := varField{
Len: uint16(fieldsize),
MaxLen: uint16(fieldsize),
BufferOffset: uint32(*ptr),
}
*ptr += fieldsize
return f
}

20
vendor/github.com/Azure/go-ntlmssp/version.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
package ntlmssp
// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx
type Version struct {
ProductMajorVersion uint8
ProductMinorVersion uint8
ProductBuild uint16
_ [3]byte
NTLMRevisionCurrent uint8
}
// DefaultVersion returns a Version with "sensible" defaults (Windows 7)
func DefaultVersion() Version {
return Version{
ProductMajorVersion: 6,
ProductMinorVersion: 1,
ProductBuild: 7601,
NTLMRevisionCurrent: 15,
}
}

View File

@@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Documentation: https://godocs.io/github.com/BurntSushi/toml
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show

View File

@@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
return md.unify(primValue.undecoded, rvalue(v))
}
// markDecodedRecursive is a helper to mark any key under the given tmap as
// decoded, recursing as needed
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
for key := range tmap {
md.decoded[md.context.add(key).String()] = struct{}{}
if tmap, ok := tmap[key].(map[string]any); ok {
md.context = append(md.context, key)
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
}
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
@@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
if err != nil {
return md.parseErr(err)
}
// Assume the Unmarshaler decoded everything, so mark all keys under
// this table as decoded.
if tmap, ok := data.(map[string]any); ok {
markDecodedRecursive(md, tmap)
}
if aot, ok := data.([]map[string]any); ok {
for _, tmap := range aot {
markDecodedRecursive(md, tmap)
}
}
return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {

View File

@@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
mapKeysSub = append(mapKeysSub, mapKey)
} else {
mapKeysDirect = append(mapKeysDirect, k)
mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
enc.encode(key.add(mapKey.String()), val)
}
}
}
@@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
if is32Bit {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
}
// Need to make a copy because ... ehm, I don't know why... I guess
// allocating a new array can cause it to fail(?)
//
// Done for: https://github.com/BurntSushi/toml/issues/430
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
@@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
addFields(rt, rv, nil)
writeFields := func(fields [][]int) {
writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := rv.FieldByIndex(fieldIndex)
@@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
l := len(fieldsDirect) + len(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}

View File

@@ -69,7 +69,7 @@ type Position struct {
Line int // Line number, starting at 1.
Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght of the error in bytes.
Len int // Length of the error in bytes.
}
func (p Position) withCol(tomlFile string) Position {

View File

@@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
pos.Line--
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
pos.Line--
}
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@@ -1117,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
case 'x':
r = lx.peek()
if !isHex(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@@ -1265,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool {
if tomlNext {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-' ||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
(r >= 0x037f && r <= 0x1fff) ||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
(r >= 0x10000 && r <= 0xeffff)
}
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || r == '_' || r == '-'
}

View File

@@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
if cap(k) > len(k) {
return append(k, piece)
}
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece

View File

@@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
// it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:]
}
@@ -529,7 +528,7 @@ func numUnderscoresOK(s string) bool {
}
}
// isHexis a superset of all the permissable characters surrounding an
// isHex is a superset of all the permissible characters surrounding an
// underscore.
accept = isHex(r)
}

View File

@@ -39,9 +39,11 @@ var (
)
// semVerRegex is the regular expression used to parse a semantic version.
const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
// This is not the official regex from the semver spec. It has been modified to allow for loose handling
// where versions like 2.1 are detected.
const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
`(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
`(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
// Version represents a single semantic version.
type Version struct {
@@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) {
}
sv := &Version{
metadata: m[8],
pre: m[5],
metadata: m[5],
pre: m[4],
original: v,
}
@@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) {
}
if m[2] != "" {
sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
sv.minor, err = strconv.ParseUint(m[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
@@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) {
}
if m[3] != "" {
sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
sv.patch, err = strconv.ParseUint(m[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
@@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool {
func validatePrerelease(p string) error {
eparts := strings.Split(p, ".")
for _, p := range eparts {
if containsOnly(p, num) {
if p == "" {
return ErrInvalidMetadata
} else if containsOnly(p, num) {
if len(p) > 1 && p[0] == '0' {
return ErrSegmentStartsZero
}
@@ -631,7 +635,9 @@ func validatePrerelease(p string) error {
func validateMetadata(m string) error {
eparts := strings.Split(m, ".")
for _, p := range eparts {
if !containsOnly(p, allowed) {
if p == "" {
return ErrInvalidMetadata
} else if !containsOnly(p, allowed) {
return ErrInvalidMetadata
}
}

View File

@@ -49,16 +49,16 @@ func ShiftNBytesLeft(dst, x []byte, n int) {
dst = append(dst, make([]byte, n/8)...)
}
// XorBytesMut assumes equal input length, replaces X with X XOR Y
// XorBytesMut replaces X with X XOR Y. len(X) must be >= len(Y).
func XorBytesMut(X, Y []byte) {
for i := 0; i < len(X); i++ {
for i := 0; i < len(Y); i++ {
X[i] ^= Y[i]
}
}
// XorBytes assumes equal input length, puts X XOR Y into Z
// XorBytes puts X XOR Y into Z. len(Z) and len(X) must be >= len(Y).
func XorBytes(Z, X, Y []byte) {
for i := 0; i < len(X); i++ {
for i := 0; i < len(Y); i++ {
Z[i] = X[i] ^ Y[i]
}
}

View File

@@ -109,8 +109,10 @@ func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte {
if len(nonce) > o.nonceSize {
panic("crypto/ocb: Incorrect nonce length given to OCB")
}
ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize)
o.crypt(enc, out, nonce, adata, plaintext)
sep := len(plaintext)
ret, out := byteutil.SliceForAppend(dst, sep+o.tagSize)
tag := o.crypt(enc, out[:sep], nonce, adata, plaintext)
copy(out[sep:], tag)
return ret
}
@@ -122,12 +124,10 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
return nil, ocbError("Ciphertext shorter than tag length")
}
sep := len(ciphertext) - o.tagSize
ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
ret, out := byteutil.SliceForAppend(dst, sep)
ciphertextData := ciphertext[:sep]
tag := ciphertext[sep:]
o.crypt(dec, out, nonce, adata, ciphertextData)
if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 {
ret = ret[:sep]
tag := o.crypt(dec, out, nonce, adata, ciphertextData)
if subtle.ConstantTimeCompare(tag, ciphertext[sep:]) == 1 {
return ret, nil
}
for i := range out {
@@ -137,7 +137,8 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
}
// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt)
// function. It returns the resulting plain/ciphertext with the tag appended.
// function. It writes the resulting plain/ciphertext into Y and returns
// the tag.
func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
//
// Consider X as a sequence of 128-bit blocks
@@ -194,13 +195,14 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))])
blockX := X[i*blockSize : (i+1)*blockSize]
blockY := Y[i*blockSize : (i+1)*blockSize]
byteutil.XorBytes(blockY, blockX, offset)
switch instruction {
case enc:
byteutil.XorBytesMut(checksum, blockX)
byteutil.XorBytes(blockY, blockX, offset)
o.block.Encrypt(blockY, blockY)
byteutil.XorBytesMut(blockY, offset)
byteutil.XorBytesMut(checksum, blockX)
case dec:
byteutil.XorBytes(blockY, blockX, offset)
o.block.Decrypt(blockY, blockY)
byteutil.XorBytesMut(blockY, offset)
byteutil.XorBytesMut(checksum, blockY)
@@ -216,31 +218,24 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
o.block.Encrypt(pad, offset)
chunkX := X[blockSize*m:]
chunkY := Y[blockSize*m : len(X)]
byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
// P_* || bit(1) || zeroes(127) - len(P_*)
switch instruction {
case enc:
paddedY := append(chunkX, byte(128))
paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...)
byteutil.XorBytesMut(checksum, paddedY)
byteutil.XorBytesMut(checksum, chunkX)
checksum[len(chunkX)] ^= 128
byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
// P_* || bit(1) || zeroes(127) - len(P_*)
case dec:
paddedX := append(chunkY, byte(128))
paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...)
byteutil.XorBytesMut(checksum, paddedX)
byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
// P_* || bit(1) || zeroes(127) - len(P_*)
byteutil.XorBytesMut(checksum, chunkY)
checksum[len(chunkY)] ^= 128
}
byteutil.XorBytes(tag, checksum, offset)
byteutil.XorBytesMut(tag, o.mask.lDol)
o.block.Encrypt(tag, tag)
byteutil.XorBytesMut(tag, o.hash(adata))
copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize])
} else {
byteutil.XorBytes(tag, checksum, offset)
byteutil.XorBytesMut(tag, o.mask.lDol)
o.block.Encrypt(tag, tag)
byteutil.XorBytesMut(tag, o.hash(adata))
copy(Y[blockSize*m:], tag[:o.tagSize])
}
return Y
byteutil.XorBytes(tag, checksum, offset)
byteutil.XorBytesMut(tag, o.mask.lDol)
o.block.Encrypt(tag, tag)
byteutil.XorBytesMut(tag, o.hash(adata))
return tag[:o.tagSize]
}
// This hash function is used to compute the tag. Per design, on empty input it

View File

@@ -7,6 +7,7 @@ package armor
import (
"encoding/base64"
"io"
"sort"
)
var armorHeaderSep = []byte(": ")
@@ -159,8 +160,15 @@ func encode(out io.Writer, blockType string, headers map[string]string, checksum
return
}
for k, v := range headers {
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
keys := make([]string, len(headers))
i := 0
for k := range headers {
keys[i] = k
i++
}
sort.Strings(keys)
for _, k := range keys {
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(headers[k]), newline)
if err != nil {
return
}

View File

@@ -6,6 +6,7 @@
package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors"
import (
"fmt"
"strconv"
)
@@ -178,3 +179,32 @@ type ErrMalformedMessage string
func (dke ErrMalformedMessage) Error() string {
return "openpgp: malformed message " + string(dke)
}
type messageTooLargeError int
func (e messageTooLargeError) Error() string {
return "openpgp: decompressed message size exceeds provided limit"
}
// ErrMessageTooLarge is returned if the read data from
// a compressed packet exceeds the provided limit.
var ErrMessageTooLarge error = messageTooLargeError(0)
// ErrEncryptionKeySelection is returned if encryption key selection fails (v2 API).
type ErrEncryptionKeySelection struct {
PrimaryKeyId string
PrimaryKeyErr error
EncSelectionKeyId *string
EncSelectionErr error
}
func (eks ErrEncryptionKeySelection) Error() string {
prefix := fmt.Sprintf("openpgp: key selection for primary key %s:", eks.PrimaryKeyId)
if eks.PrimaryKeyErr != nil {
return fmt.Sprintf("%s invalid primary key: %s", prefix, eks.PrimaryKeyErr)
}
if eks.EncSelectionKeyId != nil {
return fmt.Sprintf("%s invalid encryption key %s: %s", prefix, *eks.EncSelectionKeyId, eks.EncSelectionErr)
}
return fmt.Sprintf("%s no encryption key: %s", prefix, eks.EncSelectionErr)
}

View File

@@ -37,7 +37,7 @@ func (conf *AEADConfig) Mode() AEADMode {
// ChunkSizeByte returns the byte indicating the chunk size. The effective
// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
// limit to 16 = 4 MiB
// limit chunkSizeByte to 16 which equals to 2^22 = 4 MiB
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func (conf *AEADConfig) ChunkSizeByte() byte {
if conf == nil || conf.ChunkSize == 0 {
@@ -49,8 +49,8 @@ func (conf *AEADConfig) ChunkSizeByte() byte {
switch {
case exponent < 6:
exponent = 6
case exponent > 16:
exponent = 16
case exponent > 22:
exponent = 22
}
return byte(exponent - 6)

View File

@@ -3,7 +3,6 @@
package packet
import (
"bytes"
"crypto/cipher"
"encoding/binary"
"io"
@@ -15,12 +14,11 @@ import (
type aeadCrypter struct {
aead cipher.AEAD
chunkSize int
initialNonce []byte
nonce []byte
associatedData []byte // Chunk-independent associated data
chunkIndex []byte // Chunk counter
packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet
bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
buffer bytes.Buffer // Buffered bytes across chunks
}
// computeNonce takes the incremental index and computes an eXclusive OR with
@@ -28,12 +26,12 @@ type aeadCrypter struct {
// 5.16.1 and 5.16.2). It returns the resulting nonce.
func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected {
return append(wo.initialNonce, wo.chunkIndex...)
return wo.nonce
}
nonce = make([]byte, len(wo.initialNonce))
copy(nonce, wo.initialNonce)
offset := len(wo.initialNonce) - 8
nonce = make([]byte, len(wo.nonce))
copy(nonce, wo.nonce)
offset := len(wo.nonce) - 8
for i := 0; i < 8; i++ {
nonce[i+offset] ^= wo.chunkIndex[i]
}
@@ -62,8 +60,9 @@ func (wo *aeadCrypter) incrementIndex() error {
type aeadDecrypter struct {
aeadCrypter // Embedded ciphertext opener
reader io.Reader // 'reader' is a partialLengthReader
chunkBytes []byte
peekedBytes []byte // Used to detect last chunk
eof bool
buffer []byte // Buffered decrypted bytes
}
// Read decrypts bytes and reads them into dst. It decrypts when necessary and
@@ -71,59 +70,44 @@ type aeadDecrypter struct {
// and an error.
func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
// Return buffered plaintext bytes from previous calls
if ar.buffer.Len() > 0 {
return ar.buffer.Read(dst)
}
// Return EOF if we've previously validated the final tag
if ar.eof {
return 0, io.EOF
if len(ar.buffer) > 0 {
n = copy(dst, ar.buffer)
ar.buffer = ar.buffer[n:]
return
}
// Read a chunk
tagLen := ar.aead.Overhead()
cipherChunkBuf := new(bytes.Buffer)
_, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen))
cipherChunk := cipherChunkBuf.Bytes()
if errRead != nil && errRead != io.EOF {
copy(ar.chunkBytes, ar.peekedBytes) // Copy bytes peeked in previous chunk or in initialization
bytesRead, errRead := io.ReadFull(ar.reader, ar.chunkBytes[tagLen:])
if errRead != nil && errRead != io.EOF && errRead != io.ErrUnexpectedEOF {
return 0, errRead
}
if len(cipherChunk) > 0 {
decrypted, errChunk := ar.openChunk(cipherChunk)
if bytesRead > 0 {
ar.peekedBytes = ar.chunkBytes[bytesRead:bytesRead+tagLen]
decrypted, errChunk := ar.openChunk(ar.chunkBytes[:bytesRead])
if errChunk != nil {
return 0, errChunk
}
// Return decrypted bytes, buffering if necessary
if len(dst) < len(decrypted) {
n = copy(dst, decrypted[:len(dst)])
ar.buffer.Write(decrypted[len(dst):])
} else {
n = copy(dst, decrypted)
}
n = copy(dst, decrypted)
ar.buffer = decrypted[n:]
return
}
// Check final authentication tag
if errRead == io.EOF {
errChunk := ar.validateFinalTag(ar.peekedBytes)
if errChunk != nil {
return n, errChunk
}
ar.eof = true // Mark EOF for when we've returned all buffered data
}
return
return 0, io.EOF
}
// Close is noOp. The final authentication tag of the stream was already
// checked in the last Read call. In the future, this function could be used to
// wipe the reader and peeked, decrypted bytes, if necessary.
// Close checks the final authentication tag of the stream.
// In the future, this function could also be used to wipe the reader
// and peeked & decrypted bytes, if necessary.
func (ar *aeadDecrypter) Close() (err error) {
if !ar.eof {
errChunk := ar.validateFinalTag(ar.peekedBytes)
if errChunk != nil {
return errChunk
}
errChunk := ar.validateFinalTag(ar.peekedBytes)
if errChunk != nil {
return errChunk
}
return nil
}
@@ -132,20 +116,13 @@ func (ar *aeadDecrypter) Close() (err error) {
// the underlying plaintext and an error. It accesses peeked bytes from next
// chunk, to identify the last chunk and decrypt/validate accordingly.
func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
tagLen := ar.aead.Overhead()
// Restore carried bytes from last call
chunkExtra := append(ar.peekedBytes, data...)
// 'chunk' contains encrypted bytes, followed by an authentication tag.
chunk := chunkExtra[:len(chunkExtra)-tagLen]
ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
adata := ar.associatedData
if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
adata = append(ar.associatedData, ar.chunkIndex...)
}
nonce := ar.computeNextNonce()
plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
plainChunk, err := ar.aead.Open(data[:0:len(data)], nonce, data, adata)
if err != nil {
return nil, errors.ErrAEADTagVerification
}
@@ -183,27 +160,29 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
type aeadEncrypter struct {
aeadCrypter // Embedded plaintext sealer
writer io.WriteCloser // 'writer' is a partialLengthWriter
chunkBytes []byte
offset int
}
// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
// plaintext bytes for next call. When the stream is finished, Close() MUST be
// called to append the final tag.
func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
// Append plaintextBytes to existing buffered bytes
n, err = aw.buffer.Write(plaintextBytes)
if err != nil {
return n, err
}
// Encrypt and write chunks
for aw.buffer.Len() >= aw.chunkSize {
plainChunk := aw.buffer.Next(aw.chunkSize)
encryptedChunk, err := aw.sealChunk(plainChunk)
if err != nil {
return n, err
}
_, err = aw.writer.Write(encryptedChunk)
if err != nil {
return n, err
for n != len(plaintextBytes) {
copied := copy(aw.chunkBytes[aw.offset:aw.chunkSize], plaintextBytes[n:])
n += copied
aw.offset += copied
if aw.offset == aw.chunkSize {
encryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset])
if err != nil {
return n, err
}
_, err = aw.writer.Write(encryptedChunk)
if err != nil {
return n, err
}
aw.offset = 0
}
}
return
@@ -215,9 +194,8 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
func (aw *aeadEncrypter) Close() (err error) {
// Encrypt and write a chunk if there's buffered data left, or if we haven't
// written any chunks yet.
if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
plainChunk := aw.buffer.Bytes()
lastEncryptedChunk, err := aw.sealChunk(plainChunk)
if aw.offset > 0 || aw.bytesProcessed == 0 {
lastEncryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset])
if err != nil {
return err
}
@@ -263,7 +241,7 @@ func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
}
nonce := aw.computeNextNonce()
encrypted := aw.aead.Seal(nil, nonce, data, adata)
encrypted := aw.aead.Seal(data[:0], nonce, data, adata)
aw.bytesProcessed += len(data)
if err := aw.aeadCrypter.incrementIndex(); err != nil {
return nil, err

View File

@@ -65,24 +65,28 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) {
blockCipher := ae.cipher.new(key)
aead := ae.mode.new(blockCipher)
// Carry the first tagLen bytes
chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
tagLen := ae.mode.TagLength()
peekedBytes := make([]byte, tagLen)
chunkBytes := make([]byte, chunkSize+tagLen*2)
peekedBytes := chunkBytes[chunkSize+tagLen:]
n, err := io.ReadFull(ae.Contents, peekedBytes)
if n < tagLen || (err != nil && err != io.EOF) {
return nil, errors.AEADError("Not enough data to decrypt:" + err.Error())
}
chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
return &aeadDecrypter{
aeadCrypter: aeadCrypter{
aead: aead,
chunkSize: chunkSize,
initialNonce: ae.initialNonce,
nonce: ae.initialNonce,
associatedData: ae.associatedData(),
chunkIndex: make([]byte, 8),
packetTag: packetTypeAEADEncrypted,
},
reader: ae.Contents,
peekedBytes: peekedBytes}, nil
chunkBytes: chunkBytes,
peekedBytes: peekedBytes,
}, nil
}
// associatedData for chunks: tag, version, cipher, mode, chunk size byte

View File

@@ -98,6 +98,16 @@ func (c *Compressed) parse(r io.Reader) error {
return err
}
// LimitedBodyReader wraps the provided body reader with a limiter that restricts
// the number of bytes read to the specified limit.
// If limit is nil, the reader is unbounded.
func (c *Compressed) LimitedBodyReader(limit *int64) io.Reader {
if limit == nil {
return c.Body
}
return &LimitReader{R: c.Body, N: *limit}
}
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
@@ -159,3 +169,24 @@ func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *Compression
return
}
// LimitReader is an io.Reader that fails with MessageToLarge if read bytes exceed N.
type LimitReader struct {
R io.Reader // underlying reader
N int64 // max bytes allowed
}
func (l *LimitReader) Read(p []byte) (int, error) {
if l.N <= 0 {
return 0, errors.ErrMessageTooLarge
}
n, err := l.R.Read(p)
l.N -= int64(n)
if err == nil && l.N <= 0 {
err = errors.ErrMessageTooLarge
}
return n, err
}

View File

@@ -173,6 +173,16 @@ type Config struct {
// weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks.
// The default behavior, when the config or flag is nil, is to enable the feature.
NonDeterministicSignaturesViaNotation *bool
// InsecureAllowAllKeyFlagsWhenMissing determines how a key without valid key flags is handled.
// When set to true, a key without flags is treated as if all flags are enabled.
// This behavior is consistent with GPG.
InsecureAllowAllKeyFlagsWhenMissing bool
// MaxDecompressedMessageSize specifies the maximum number of bytes that can be
// read from a compressed packet. This serves as an upper limit to prevent
// excessively large decompressed messages.
MaxDecompressedMessageSize *int64
}
func (c *Config) Random() io.Reader {
@@ -403,6 +413,20 @@ func (c *Config) RandomizeSignaturesViaNotation() bool {
return *c.NonDeterministicSignaturesViaNotation
}
func (c *Config) AllowAllKeyFlagsWhenMissing() bool {
if c == nil {
return false
}
return c.InsecureAllowAllKeyFlagsWhenMissing
}
func (c *Config) DecompressedMessageSizeLimit() *int64 {
if c == nil {
return nil
}
return c.MaxDecompressedMessageSize
}
// BoolPointer is a helper function to set a boolean pointer in the Config.
// e.g., config.CheckPacketSequence = BoolPointer(true)
func BoolPointer(value bool) *bool {

View File

@@ -1048,12 +1048,17 @@ func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) {
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKey) KeyIdString() string {
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
return fmt.Sprintf("%016X", pk.KeyId)
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
// This function will return the full key id for v5 and v6 keys
// since the short key id is undefined for them.
func (pk *PublicKey) KeyIdShortString() string {
if pk.Version >= 5 {
return pk.KeyIdString()
}
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
}

View File

@@ -1288,7 +1288,9 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
if sig.IssuerKeyId != nil && sig.Version == 4 {
keyId := make([]byte, 8)
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId})
// Note: making this critical breaks RPM <=4.16.
// See: https://github.com/ProtonMail/go-crypto/issues/263
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
}
// Notation Data
for _, notation := range sig.Notations {

View File

@@ -70,8 +70,10 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e
aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData())
// Carry the first tagLen bytes
chunkSize := decodeAEADChunkSize(se.ChunkSizeByte)
tagLen := se.Mode.TagLength()
peekedBytes := make([]byte, tagLen)
chunkBytes := make([]byte, chunkSize+tagLen*2)
peekedBytes := chunkBytes[chunkSize+tagLen:]
n, err := io.ReadFull(se.Contents, peekedBytes)
if n < tagLen || (err != nil && err != io.EOF) {
return nil, errors.StructuralError("not enough data to decrypt:" + err.Error())
@@ -81,12 +83,13 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e
aeadCrypter: aeadCrypter{
aead: aead,
chunkSize: decodeAEADChunkSize(se.ChunkSizeByte),
initialNonce: nonce,
nonce: nonce,
associatedData: se.associatedData(),
chunkIndex: make([]byte, 8),
chunkIndex: nonce[len(nonce)-8:],
packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
},
reader: se.Contents,
chunkBytes: chunkBytes,
peekedBytes: peekedBytes,
}, nil
}
@@ -130,16 +133,20 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite
aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix)
chunkSize := decodeAEADChunkSize(chunkSizeByte)
tagLen := aead.Overhead()
chunkBytes := make([]byte, chunkSize+tagLen)
return &aeadEncrypter{
aeadCrypter: aeadCrypter{
aead: aead,
chunkSize: decodeAEADChunkSize(chunkSizeByte),
chunkSize: chunkSize,
associatedData: prefix,
chunkIndex: make([]byte, 8),
initialNonce: nonce,
nonce: nonce,
chunkIndex: nonce[len(nonce)-8:],
packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
},
writer: ciphertext,
writer: ciphertext,
chunkBytes: chunkBytes,
}, nil
}
@@ -149,10 +156,10 @@ func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inpu
encryptionKey := make([]byte, c.KeySize())
_, _ = readFull(hkdfReader, encryptionKey)
// Last 64 bits of nonce are the counter
nonce = make([]byte, mode.IvLength()-8)
nonce = make([]byte, mode.IvLength())
_, _ = readFull(hkdfReader, nonce)
// Last 64 bits of nonce are the counter
_, _ = readFull(hkdfReader, nonce[:len(nonce)-8])
blockCipher := c.new(encryptionKey)
aead = mode.new(blockCipher)

View File

@@ -259,7 +259,7 @@ FindLiteralData:
}
switch p := p.(type) {
case *packet.Compressed:
if err := packets.Push(p.Body); err != nil {
if err := packets.Push(p.LimitedBodyReader(config.DecompressedMessageSizeLimit())); err != nil {
return nil, err
}
case *packet.OnePassSignature:

View File

@@ -253,34 +253,12 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := algorithm.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
var salt []byte
if signer != nil {
if hash, err = selectHash(candidateHashes, config.Hash(), signer); err != nil {
return nil, err
}
var opsVersion = 3
if signer.Version == 6 {
opsVersion = signer.Version
@@ -558,13 +536,34 @@ func (s signatureWriter) Close() error {
return s.encryptedData.Close()
}
func selectHashForSigningKey(config *packet.Config, signer *packet.PublicKey) crypto.Hash {
acceptableHashes := acceptableHashesToWrite(signer)
hash, ok := algorithm.HashToHashId(config.Hash())
if !ok {
return config.Hash()
}
for _, acceptableHashes := range acceptableHashes {
if acceptableHashes == hash {
return config.Hash()
}
}
if len(acceptableHashes) > 0 {
defaultAcceptedHash, ok := algorithm.HashIdToHash(acceptableHashes[0])
if ok {
return defaultAcceptedHash
}
}
return config.Hash()
}
func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
sigLifetimeSecs := config.SigLifetime()
hash := selectHashForSigningKey(config, signer)
return &packet.Signature{
Version: signer.Version,
SigType: sigType,
PubKeyAlgo: signer.PubKeyAlgo,
Hash: config.Hash(),
Hash: hash,
CreationTime: config.Now(),
IssuerKeyId: &signer.KeyId,
IssuerFingerprint: signer.Fingerprint,
@@ -618,3 +617,74 @@ func handleCompression(compressed io.WriteCloser, candidateCompression []uint8,
}
return data, nil
}
// selectHash selects the preferred hash given the candidateHashes and the configuredHash
func selectHash(candidateHashes []byte, configuredHash crypto.Hash, signer *packet.PrivateKey) (hash crypto.Hash, err error) {
acceptableHashes := acceptableHashesToWrite(&signer.PublicKey)
candidateHashes = intersectPreferences(acceptableHashes, candidateHashes)
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
if len(acceptableHashes) > 0 {
if h, ok := algorithm.HashIdToHash(acceptableHashes[0]); ok {
hash = h
} else {
return 0, errors.UnsupportedError("no candidate hash functions are compiled in.")
}
} else {
return 0, errors.UnsupportedError("no candidate hash functions are compiled in.")
}
}
return
}
func acceptableHashesToWrite(singingKey *packet.PublicKey) []uint8 {
switch singingKey.PubKeyAlgo {
case packet.PubKeyAlgoEd448:
return []uint8{
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_512),
}
case packet.PubKeyAlgoECDSA, packet.PubKeyAlgoEdDSA:
if curve, err := singingKey.Curve(); err == nil {
if curve == packet.Curve448 ||
curve == packet.CurveNistP521 ||
curve == packet.CurveBrainpoolP512 {
return []uint8{
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_512),
}
} else if curve == packet.CurveBrainpoolP384 ||
curve == packet.CurveNistP384 {
return []uint8{
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_512),
}
}
}
}
return []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_256),
hashToHashId(crypto.SHA3_512),
}
}

View File

@@ -17,9 +17,9 @@ ANTLR4 that it is compatible with (I.E. uses the /v4 path).
However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
list the release tag such as @4.12.0 - this was confusing, to say the least.
list the release tag such as @4.13.1 - this was confusing, to say the least.
As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
@@ -49,7 +49,7 @@ Here is a general/recommended template for an ANTLR based recognizer in Go:
.
├── parser
│ ├── mygrammar.g4
│ ├── antlr-4.12.1-complete.jar
│ ├── antlr-4.13.1-complete.jar
│ ├── generate.go
│ └── generate.sh
├── parsing - generated code goes here
@@ -71,7 +71,7 @@ And the generate.sh file will look similar to this:
#!/bin/sh
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here

View File

@@ -4,8 +4,6 @@
package antlr
import "sync"
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
var ATNInvalidAltNumber int
@@ -56,9 +54,9 @@ type ATN struct {
//
states []ATNState
mu sync.Mutex
stateMu sync.RWMutex
edgeMu sync.RWMutex
mu Mutex
stateMu RWMutex
edgeMu RWMutex
}
// NewATN returns a new ATN struct representing the given grammarType and is used

View File

@@ -73,9 +73,6 @@ func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *AT
// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
// are just wrappers around this one.
func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
if semanticContext == nil {
panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
}
b := &ATNConfig{}
b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
b.cType = parserConfig

View File

@@ -148,7 +148,7 @@ func (is *InputStream) GetTextFromInterval(i Interval) string {
}
func (*InputStream) GetSourceName() string {
return ""
return "Obtained from string"
}
// String returns the entire input stream as a string

View File

@@ -8,7 +8,6 @@ import (
"container/list"
"runtime/debug"
"sort"
"sync"
)
// Collectable is an interface that a struct should implement if it is to be
@@ -587,12 +586,12 @@ type VisitRecord struct {
type VisitList struct {
cache *list.List
lock sync.RWMutex
lock RWMutex
}
var visitListPool = VisitList{
cache: list.New(),
lock: sync.RWMutex{},
lock: RWMutex{},
}
// NewVisitRecord returns a new VisitRecord instance from the pool if available.

View File

@@ -207,7 +207,7 @@ func (b *BaseLexer) NextToken() Token {
for {
b.thetype = TokenInvalidType
ttype := b.safeMatch()
ttype := b.safeMatch() // Defaults to LexerSkip
if b.input.LA(1) == TokenEOF {
b.hitEOF = true

View File

@@ -40,6 +40,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
// TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim!
lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)

41
vendor/github.com/antlr4-go/antlr/v4/mutex.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
//go:build !antlr.nomutex
// +build !antlr.nomutex
package antlr
import "sync"
// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it
// is used to provide a mutex implementation for the antlr package, which users
// can turn off with the build tag -tags antlr.nomutex
type Mutex struct {
mu sync.Mutex
}
func (m *Mutex) Lock() {
m.mu.Lock()
}
func (m *Mutex) Unlock() {
m.mu.Unlock()
}
type RWMutex struct {
mu sync.RWMutex
}
func (m *RWMutex) Lock() {
m.mu.Lock()
}
func (m *RWMutex) Unlock() {
m.mu.Unlock()
}
func (m *RWMutex) RLock() {
m.mu.RLock()
}
func (m *RWMutex) RUnlock() {
m.mu.RUnlock()
}

32
vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
//go:build antlr.nomutex
// +build antlr.nomutex
package antlr
type Mutex struct{}
func (m *Mutex) Lock() {
// No-op
}
func (m *Mutex) Unlock() {
// No-op
}
type RWMutex struct{}
func (m *RWMutex) Lock() {
// No-op
}
func (m *RWMutex) Unlock() {
// No-op
}
func (m *RWMutex) RLock() {
// No-op
}
func (m *RWMutex) RUnlock() {
// No-op
}

View File

@@ -10,8 +10,6 @@ import (
"strings"
)
var ()
// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
@@ -883,7 +881,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
// the ERROR state was reached, outerContext as the initial parser context from the paper
// or the parser stack at the instant before prediction commences.
//
// Teh func returns the value to return from [AdaptivePredict], or
// The func returns the value to return from [AdaptivePredict], or
// [ATNInvalidAltNumber] if a suitable alternative was not
// identified and [AdaptivePredict] should report an error instead.
func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {

View File

@@ -6,7 +6,6 @@ package antlr
import (
"fmt"
"golang.org/x/exp/slices"
"strconv"
)
@@ -101,7 +100,7 @@ func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int)
hash = murmurUpdate(hash, returnState)
}
hash = murmurFinish(hash, len(parents)<<1)
nec := &PredictionContext{}
nec.cachedHash = hash
nec.pcType = PredictionContextArray
@@ -115,6 +114,9 @@ func (p *PredictionContext) Hash() int {
}
func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
if p == other {
return true
}
switch p.pcType {
case PredictionContextEmpty:
otherP := other.(*PredictionContext)
@@ -138,13 +140,11 @@ func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool
if p.cachedHash != other.Hash() {
return false // can't be same if hash is different
}
// Must compare the actual array elements and not just the array address
//
return slices.Equal(p.returnStates, other.returnStates) &&
slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
return x.Equals(y)
})
return intSlicesEqual(p.returnStates, other.returnStates) &&
pcSliceEqual(p.parents, other.parents)
}
func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
@@ -152,23 +152,23 @@ func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext
return false
}
otherP := other.(*PredictionContext)
if otherP == nil {
if otherP == nil || otherP.pcType != PredictionContextSingleton {
return false
}
if p.cachedHash != otherP.Hash() {
return false // Can't be same if hash is different
}
if p.returnState != otherP.getReturnState(0) {
return false
}
// Both parents must be nil if one is
if p.parentCtx == nil {
return otherP.parentCtx == nil
}
return p.parentCtx.Equals(otherP.parentCtx)
}
@@ -225,27 +225,27 @@ func (p *PredictionContext) String() string {
return "$"
case PredictionContextSingleton:
var up string
if p.parentCtx == nil {
up = ""
} else {
up = p.parentCtx.String()
}
if len(up) == 0 {
if p.returnState == BasePredictionContextEmptyReturnState {
return "$"
}
return strconv.Itoa(p.returnState)
}
return strconv.Itoa(p.returnState) + " " + up
case PredictionContextArray:
if p.isEmpty() {
return "[]"
}
s := "["
for i := 0; i < len(p.returnStates); i++ {
if i > 0 {
@@ -263,7 +263,7 @@ func (p *PredictionContext) String() string {
}
}
return s + "]"
default:
return "unknown"
}
@@ -309,18 +309,18 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *Predict
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
state := a.states[outerContext.GetInvokingState()]
transition := state.GetTransitions()[0]
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
}
func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
// Share same graph if both same
//
if a == b || a.Equals(b) {
return a
}
if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
return mergeSingletons(a, b, rootIsWildcard, mergeCache)
}
@@ -334,7 +334,7 @@ func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *Pr
return b
}
}
// Convert either Singleton or Empty to arrays, so that we can merge them
//
ara := convertToArray(a)
@@ -395,7 +395,7 @@ func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *J
return previous
}
}
rootMerge := mergeRoot(a, b, rootIsWildcard)
if rootMerge != nil {
if mergeCache != nil {
@@ -564,7 +564,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa
i := 0 // walks a
j := 0 // walks b
k := 0 // walks target M array
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
// walk and merge to yield mergedParents, mergedReturnStates
@@ -626,9 +626,9 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa
mergedParents = mergedParents[0:k]
mergedReturnStates = mergedReturnStates[0:k]
}
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
// if we created same array as a or b, return that instead
// TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
if M.Equals(a) {
@@ -650,7 +650,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa
return b
}
combineCommonParents(&mergedParents)
if mergeCache != nil {
mergeCache.Put(a, b, M)
}
@@ -666,7 +666,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa
//goland:noinspection GoUnusedFunction
func combineCommonParents(parents *[]*PredictionContext) {
uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
for p := 0; p < len(*parents); p++ {
parent := (*parents)[p]
_, _ = uniqueParents.Put(parent)
@@ -685,7 +685,7 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr
if present {
return existing
}
existing, present = contextCache.Get(context)
if present {
visited.Put(context, existing)
@@ -722,6 +722,6 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr
contextCache.add(updated)
visited.Put(updated, updated)
visited.Put(context, updated)
return updated
}

View File

@@ -56,7 +56,7 @@ var tokenTypeMapCache = make(map[string]int)
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
runtimeVersion := "4.12.0"
runtimeVersion := "4.13.1"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}

View File

@@ -9,7 +9,6 @@ import (
"path/filepath"
"sort"
"strconv"
"sync"
)
// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
@@ -30,7 +29,7 @@ type goRunStats struct {
// within this package.
//
jStats []*JStatRec
jStatsLock sync.RWMutex
jStatsLock RWMutex
topN int
topNByMax []*JStatRec
topNByUsed []*JStatRec

View File

@@ -104,6 +104,25 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
return b.source
}
func (b *BaseToken) GetText() string {
if b.text != "" {
return b.text
}
input := b.GetInputStream()
if input == nil {
return ""
}
n := input.Size()
if b.GetStart() < n && b.GetStop() < n {
return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop()))
}
return "<EOF>"
}
func (b *BaseToken) SetText(text string) {
b.text = text
}
func (b *BaseToken) GetTokenIndex() int {
return b.tokenIndex
}
@@ -120,6 +139,28 @@ func (b *BaseToken) GetInputStream() CharStream {
return b.source.charStream
}
func (b *BaseToken) String() string {
txt := b.GetText()
if txt != "" {
txt = strings.Replace(txt, "\n", "\\n", -1)
txt = strings.Replace(txt, "\r", "\\r", -1)
txt = strings.Replace(txt, "\t", "\\t", -1)
} else {
txt = "<no text>"
}
var ch string
if b.GetChannel() > 0 {
ch = ",channel=" + strconv.Itoa(b.GetChannel())
} else {
ch = ""
}
return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" +
txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" +
ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]"
}
type CommonToken struct {
BaseToken
}
@@ -170,44 +211,3 @@ func (c *CommonToken) clone() *CommonToken {
t.text = c.GetText()
return t
}
func (c *CommonToken) GetText() string {
if c.text != "" {
return c.text
}
input := c.GetInputStream()
if input == nil {
return ""
}
n := input.Size()
if c.start < n && c.stop < n {
return input.GetTextFromInterval(NewInterval(c.start, c.stop))
}
return "<EOF>"
}
func (c *CommonToken) SetText(text string) {
c.text = text
}
func (c *CommonToken) String() string {
txt := c.GetText()
if txt != "" {
txt = strings.Replace(txt, "\n", "\\n", -1)
txt = strings.Replace(txt, "\r", "\\r", -1)
txt = strings.Replace(txt, "\t", "\\t", -1)
} else {
txt = "<no text>"
}
var ch string
if c.channel > 0 {
ch = ",channel=" + strconv.Itoa(c.channel)
} else {
ch = ""
}
return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
}

View File

@@ -326,3 +326,56 @@ func isDirectory(dir string) (bool, error) {
}
return fileInfo.IsDir(), err
}
// intSlicesEqual returns true if the two slices of ints are equal, and is a little
// faster than slices.Equal.
func intSlicesEqual(s1, s2 []int) bool {
if s1 == nil && s2 == nil {
return true
}
if s1 == nil || s2 == nil {
return false
}
if len(s1) == 0 && len(s2) == 0 {
return true
}
if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) {
return false
}
// If the slices are using the same memory, then they are the same slice
if &s1[0] == &s2[0] {
return true
}
for i, v := range s1 {
if v != s2[i] {
return false
}
}
return true
}
func pcSliceEqual(s1, s2 []*PredictionContext) bool {
if s1 == nil && s2 == nil {
return true
}
if s1 == nil || s2 == nil {
return false
}
if len(s1) == 0 && len(s2) == 0 {
return true
}
if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) {
return false
}
// If the slices are using the same memory, then they are the same slice
if &s1[0] == &s2[0] {
return true
}
for i, v := range s1 {
if !v.Equals(s2[i]) {
return false
}
}
return true
}

View File

@@ -349,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
if cfg.hasSSOTokenProviderConfiguration() {
skippedFiles = 0
for _, f := range files {
section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)))
section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))
if ok {
var ssoSession ssoSession
ssoSession.setFromIniSection(section)

View File

@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.55.5"
const SDKVersion = "1.55.7"

View File

@@ -290,13 +290,15 @@ type downloader struct {
in *s3.GetObjectInput
w io.WriterAt
wg sync.WaitGroup
m sync.Mutex
wg sync.WaitGroup
m sync.Mutex
once sync.Once
pos int64
totalBytes int64
written int64
err error
etag string
partBodyMaxRetries int
}
@@ -424,6 +426,9 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
// Get the next byte range of data
in.Range = aws.String(chunk.ByteRange())
if in.VersionId == nil && d.etag != "" {
in.IfMatch = aws.String(d.etag)
}
var n int64
var err error
@@ -466,6 +471,9 @@ func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64
return 0, err
}
d.setTotalBytes(resp) // Set total if not yet set.
d.once.Do(func() {
d.etag = aws.StringValue(resp.ETag)
})
var src io.Reader = resp.Body
if d.cfg.BufferProvider != nil {

View File

@@ -1,62 +0,0 @@
package backoff
import (
"context"
"time"
)
// BackOffContext is a backoff policy that stops retrying after the context
// is canceled.
type BackOffContext interface { // nolint: golint
BackOff
Context() context.Context
}
type backOffContext struct {
BackOff
ctx context.Context
}
// WithContext returns a BackOffContext with context ctx
//
// ctx must not be nil
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
if ctx == nil {
panic("nil context")
}
if b, ok := b.(*backOffContext); ok {
return &backOffContext{
BackOff: b.BackOff,
ctx: ctx,
}
}
return &backOffContext{
BackOff: b,
ctx: ctx,
}
}
func getContext(b BackOff) context.Context {
if cb, ok := b.(BackOffContext); ok {
return cb.Context()
}
if tb, ok := b.(*backOffTries); ok {
return getContext(tb.delegate)
}
return context.Background()
}
func (b *backOffContext) Context() context.Context {
return b.ctx
}
func (b *backOffContext) NextBackOff() time.Duration {
select {
case <-b.ctx.Done():
return Stop
default:
return b.BackOff.NextBackOff()
}
}

View File

@@ -1,216 +0,0 @@
package backoff
import (
"math/rand"
"time"
)
/*
ExponentialBackOff is a backoff implementation that increases the backoff
period for each retry attempt using a randomization function that grows exponentially.
NextBackOff() is calculated using the following formula:
randomized interval =
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
In other words NextBackOff() will range between the randomization factor
percentage below and above the retry interval.
For example, given the following parameters:
RetryInterval = 2
RandomizationFactor = 0.5
Multiplier = 2
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval.
If the time elapsed since an ExponentialBackOff instance is created goes past the
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
The elapsed time can be reset by calling Reset().
Example: Given the following default arguments, for 10 tries the sequence will be,
and assuming we go over the MaxElapsedTime on the 10th try:
Request # RetryInterval (seconds) Randomized Interval (seconds)
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
type ExponentialBackOff struct {
InitialInterval time.Duration
RandomizationFactor float64
Multiplier float64
MaxInterval time.Duration
// After MaxElapsedTime the ExponentialBackOff returns Stop.
// It never stops if MaxElapsedTime == 0.
MaxElapsedTime time.Duration
Stop time.Duration
Clock Clock
currentInterval time.Duration
startTime time.Time
}
// Clock is an interface that returns current time for BackOff.
type Clock interface {
Now() time.Time
}
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
type ExponentialBackOffOpts func(*ExponentialBackOff)
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
DefaultRandomizationFactor = 0.5
DefaultMultiplier = 1.5
DefaultMaxInterval = 60 * time.Second
DefaultMaxElapsedTime = 15 * time.Minute
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
Multiplier: DefaultMultiplier,
MaxInterval: DefaultMaxInterval,
MaxElapsedTime: DefaultMaxElapsedTime,
Stop: Stop,
Clock: SystemClock,
}
for _, fn := range opts {
fn(b)
}
b.Reset()
return b
}
// WithInitialInterval sets the initial interval between retries.
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.InitialInterval = duration
}
}
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.RandomizationFactor = randomizationFactor
}
}
// WithMultiplier sets the multiplier for increasing the interval after each retry.
func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.Multiplier = multiplier
}
}
// WithMaxInterval sets the maximum interval between retries.
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.MaxInterval = duration
}
}
// WithMaxElapsedTime sets the maximum total time for retries.
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.MaxElapsedTime = duration
}
}
// WithRetryStopDuration sets the duration after which retries should stop.
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.Stop = duration
}
}
// WithClockProvider sets the clock used to measure time.
func WithClockProvider(clock Clock) ExponentialBackOffOpts {
return func(ebo *ExponentialBackOff) {
ebo.Clock = clock
}
}
type systemClock struct{}
func (t systemClock) Now() time.Time {
return time.Now()
}
// SystemClock implements Clock interface that uses time.Now().
var SystemClock = systemClock{}
// Reset the interval back to the initial retry interval and restarts the timer.
// Reset must be called before using b.
func (b *ExponentialBackOff) Reset() {
b.currentInterval = b.InitialInterval
b.startTime = b.Clock.Now()
}
// NextBackOff calculates the next backoff interval using the formula:
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
func (b *ExponentialBackOff) NextBackOff() time.Duration {
// Make sure we have not gone over the maximum elapsed time.
elapsed := b.GetElapsedTime()
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
b.incrementCurrentInterval()
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
return b.Stop
}
return next
}
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
// is created and is reset when Reset() is called.
//
// The elapsed time is computed using time.Now().UnixNano(). It is
// safe to call even while the backoff policy is used by a running
// ticker.
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
return b.Clock.Now().Sub(b.startTime)
}
// Increments the current interval by multiplying it with the multiplier.
func (b *ExponentialBackOff) incrementCurrentInterval() {
// Check for overflow, if overflow is detected set the current interval to the max interval.
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
b.currentInterval = b.MaxInterval
} else {
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
}
}
// Returns a random value from the following interval:
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
if randomizationFactor == 0 {
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
}
var delta = randomizationFactor * float64(currentInterval)
var minInterval = float64(currentInterval) - delta
var maxInterval = float64(currentInterval) + delta
// Get a random value from the range [minInterval, maxInterval].
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
// we want a 33% chance for selecting either 1, 2 or 3.
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
}

View File

@@ -1,146 +0,0 @@
package backoff
import (
"errors"
"time"
)
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
// The operation will be retried using a backoff policy if it returns an error.
type OperationWithData[T any] func() (T, error)
// An Operation is executing by Retry() or RetryNotify().
// The operation will be retried using a backoff policy if it returns an error.
type Operation func() error
func (o Operation) withEmptyData() OperationWithData[struct{}] {
return func() (struct{}, error) {
return struct{}{}, o()
}
}
// Notify is a notify-on-error function. It receives an operation error and
// backoff delay if the operation failed (with an error).
//
// NOTE that if the backoff policy stated to stop retrying,
// the notify function isn't called.
type Notify func(error, time.Duration)
// Retry the operation o until it does not return error or BackOff stops.
// o is guaranteed to be run at least once.
//
// If o returns a *PermanentError, the operation is not retried, and the
// wrapped error is returned.
//
// Retry sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func Retry(o Operation, b BackOff) error {
return RetryNotify(o, b, nil)
}
// RetryWithData is like Retry but returns data in the response too.
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
return RetryNotifyWithData(o, b, nil)
}
// RetryNotify calls notify function with the error and wait duration
// for each failed attempt before sleep.
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
return RetryNotifyWithTimer(operation, b, notify, nil)
}
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
return doRetryNotify(operation, b, notify, nil)
}
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
// for each failed attempt before sleep.
// A default timer that uses system timer is used when nil is passed.
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
return err
}
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
return doRetryNotify(operation, b, notify, t)
}
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
var (
err error
next time.Duration
res T
)
if t == nil {
t = &defaultTimer{}
}
defer func() {
t.Stop()
}()
ctx := getContext(b)
b.Reset()
for {
res, err = operation()
if err == nil {
return res, nil
}
var permanent *PermanentError
if errors.As(err, &permanent) {
return res, permanent.Err
}
if next = b.NextBackOff(); next == Stop {
if cerr := ctx.Err(); cerr != nil {
return res, cerr
}
return res, err
}
if notify != nil {
notify(err, next)
}
t.Start(next)
select {
case <-ctx.Done():
return res, ctx.Err()
case <-t.C():
}
}
}
// PermanentError signals that the operation should not be retried.
type PermanentError struct {
Err error
}
func (e *PermanentError) Error() string {
return e.Err.Error()
}
func (e *PermanentError) Unwrap() error {
return e.Err
}
func (e *PermanentError) Is(target error) bool {
_, ok := target.(*PermanentError)
return ok
}
// Permanent wraps the given err in a *PermanentError.
func Permanent(err error) error {
if err == nil {
return nil
}
return &PermanentError{
Err: err,
}
}

View File

@@ -1,38 +0,0 @@
package backoff
import "time"
/*
WithMaxRetries creates a wrapper around another BackOff, which will
return Stop if NextBackOff() has been called too many times since
the last time Reset() was called
Note: Implementation is not thread-safe.
*/
func WithMaxRetries(b BackOff, max uint64) BackOff {
return &backOffTries{delegate: b, maxTries: max}
}
type backOffTries struct {
delegate BackOff
maxTries uint64
numTries uint64
}
func (b *backOffTries) NextBackOff() time.Duration {
if b.maxTries == 0 {
return Stop
}
if b.maxTries > 0 {
if b.maxTries <= b.numTries {
return Stop
}
b.numTries++
}
return b.delegate.NextBackOff()
}
func (b *backOffTries) Reset() {
b.numTries = 0
b.delegate.Reset()
}

29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md generated vendored Normal file
View File

@@ -0,0 +1,29 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [5.0.0] - 2024-12-19
### Added
- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
### Changed
- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
- Retry function now accepts a context.Context.
- Operation function signature changed to return result (any type) and error.
### Removed
- RetryNotify* and RetryWithData functions. Only single Retry function remains.
- Optional arguments from ExponentialBackoff constructor.
- Clock and Timer interfaces.
### Fixed
- The original error is returned from Retry if there's a PermanentError. (#144)
- The Retry function respects the wrapped PermanentError. (#140)

View File

@@ -1,4 +1,4 @@
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
# Exponential Backoff [![GoDoc][godoc image]][godoc]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
## Usage
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
For most cases, use `Retry` function. See [example_test.go][example] for an example.
If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
## Contributing
@@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
* Please don't send a PR without opening an issue and discussing it first.
* If proposed change is not a common use case, I will probably not accept it.
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go

View File

@@ -15,16 +15,16 @@ import "time"
// BackOff is a backoff policy for retrying an operation.
type BackOff interface {
// NextBackOff returns the duration to wait before retrying the operation,
// or backoff. Stop to indicate that no more retries should be made.
// backoff.Stop to indicate that no more retries should be made.
//
// Example usage:
//
// duration := backoff.NextBackOff();
// if (duration == backoff.Stop) {
// // Do not retry operation.
// } else {
// // Sleep for duration and retry operation.
// }
// duration := backoff.NextBackOff()
// if duration == backoff.Stop {
// // Do not retry operation.
// } else {
// // Sleep for duration and retry operation.
// }
//
NextBackOff() time.Duration

46
vendor/github.com/cenkalti/backoff/v5/error.go generated vendored Normal file
View File

@@ -0,0 +1,46 @@
package backoff
import (
"fmt"
"time"
)
// PermanentError signals that the operation should not be retried.
type PermanentError struct {
Err error
}
// Permanent wraps the given err in a *PermanentError.
func Permanent(err error) error {
if err == nil {
return nil
}
return &PermanentError{
Err: err,
}
}
// Error returns a string representation of the Permanent error.
func (e *PermanentError) Error() string {
return e.Err.Error()
}
// Unwrap returns the wrapped error.
func (e *PermanentError) Unwrap() error {
return e.Err
}
// RetryAfterError signals that the operation should be retried after the given duration.
type RetryAfterError struct {
Duration time.Duration
}
// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
func RetryAfter(seconds int) error {
return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
}
// Error returns a string representation of the RetryAfter error.
func (e *RetryAfterError) Error() string {
return fmt.Sprintf("retry after %s", e.Duration)
}

125
vendor/github.com/cenkalti/backoff/v5/exponential.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
package backoff
import (
"math/rand"
"time"
)
/*
ExponentialBackOff is a backoff implementation that increases the backoff
period for each retry attempt using a randomization function that grows exponentially.
NextBackOff() is calculated using the following formula:
randomized interval =
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
In other words NextBackOff() will range between the randomization factor
percentage below and above the retry interval.
For example, given the following parameters:
RetryInterval = 2
RandomizationFactor = 0.5
Multiplier = 2
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval.
If the time elapsed since an ExponentialBackOff instance is created goes past the
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
The elapsed time can be reset by calling Reset().
Example: Given the following default arguments, for 10 tries the sequence will be,
and assuming we go over the MaxElapsedTime on the 10th try:
Request # RetryInterval (seconds) Randomized Interval (seconds)
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
type ExponentialBackOff struct {
InitialInterval time.Duration
RandomizationFactor float64
Multiplier float64
MaxInterval time.Duration
currentInterval time.Duration
}
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
DefaultRandomizationFactor = 0.5
DefaultMultiplier = 1.5
DefaultMaxInterval = 60 * time.Second
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
func NewExponentialBackOff() *ExponentialBackOff {
return &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
Multiplier: DefaultMultiplier,
MaxInterval: DefaultMaxInterval,
}
}
// Reset the interval back to the initial retry interval and restarts the timer.
// Reset must be called before using b.
func (b *ExponentialBackOff) Reset() {
b.currentInterval = b.InitialInterval
}
// NextBackOff calculates the next backoff interval using the formula:
//
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
func (b *ExponentialBackOff) NextBackOff() time.Duration {
if b.currentInterval == 0 {
b.currentInterval = b.InitialInterval
}
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
b.incrementCurrentInterval()
return next
}
// Increments the current interval by multiplying it with the multiplier.
func (b *ExponentialBackOff) incrementCurrentInterval() {
// Check for overflow, if overflow is detected set the current interval to the max interval.
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
b.currentInterval = b.MaxInterval
} else {
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
}
}
// Returns a random value from the following interval:
//
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
if randomizationFactor == 0 {
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
}
var delta = randomizationFactor * float64(currentInterval)
var minInterval = float64(currentInterval) - delta
var maxInterval = float64(currentInterval) + delta
// Get a random value from the range [minInterval, maxInterval].
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
// we want a 33% chance for selecting either 1, 2 or 3.
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
}

139
vendor/github.com/cenkalti/backoff/v5/retry.go generated vendored Normal file
View File

@@ -0,0 +1,139 @@
package backoff
import (
"context"
"errors"
"time"
)
// DefaultMaxElapsedTime sets a default limit for the total retry duration.
const DefaultMaxElapsedTime = 15 * time.Minute
// Operation is a function that attempts an operation and may be retried.
type Operation[T any] func() (T, error)
// Notify is a function called on operation error with the error and backoff duration.
type Notify func(error, time.Duration)
// retryOptions holds configuration settings for the retry mechanism.
type retryOptions struct {
BackOff BackOff // Strategy for calculating backoff periods.
Timer timer // Timer to manage retry delays.
Notify Notify // Optional function to notify on each retry error.
MaxTries uint // Maximum number of retry attempts.
MaxElapsedTime time.Duration // Maximum total time for all retries.
}
type RetryOption func(*retryOptions)
// WithBackOff configures a custom backoff strategy.
func WithBackOff(b BackOff) RetryOption {
return func(args *retryOptions) {
args.BackOff = b
}
}
// withTimer sets a custom timer for managing delays between retries.
func withTimer(t timer) RetryOption {
return func(args *retryOptions) {
args.Timer = t
}
}
// WithNotify sets a notification function to handle retry errors.
func WithNotify(n Notify) RetryOption {
return func(args *retryOptions) {
args.Notify = n
}
}
// WithMaxTries limits the number of retry attempts.
func WithMaxTries(n uint) RetryOption {
return func(args *retryOptions) {
args.MaxTries = n
}
}
// WithMaxElapsedTime limits the total duration for retry attempts.
func WithMaxElapsedTime(d time.Duration) RetryOption {
return func(args *retryOptions) {
args.MaxElapsedTime = d
}
}
// Retry attempts the operation until success, a permanent error, or backoff completion.
// It ensures the operation is executed at least once.
//
// Returns the operation result or error if retries are exhausted or context is cancelled.
func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
// Initialize default retry options.
args := &retryOptions{
BackOff: NewExponentialBackOff(),
Timer: &defaultTimer{},
MaxElapsedTime: DefaultMaxElapsedTime,
}
// Apply user-provided options to the default settings.
for _, opt := range opts {
opt(args)
}
defer args.Timer.Stop()
startedAt := time.Now()
args.BackOff.Reset()
for numTries := uint(1); ; numTries++ {
// Execute the operation.
res, err := operation()
if err == nil {
return res, nil
}
// Stop retrying if maximum tries exceeded.
if args.MaxTries > 0 && numTries >= args.MaxTries {
return res, err
}
// Handle permanent errors without retrying.
var permanent *PermanentError
if errors.As(err, &permanent) {
return res, err
}
// Stop retrying if context is cancelled.
if cerr := context.Cause(ctx); cerr != nil {
return res, cerr
}
// Calculate next backoff duration.
next := args.BackOff.NextBackOff()
if next == Stop {
return res, err
}
// Reset backoff if RetryAfterError is encountered.
var retryAfter *RetryAfterError
if errors.As(err, &retryAfter) {
next = retryAfter.Duration
args.BackOff.Reset()
}
// Stop retrying if maximum elapsed time exceeded.
if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
return res, err
}
// Notify on error if a notifier function is provided.
if args.Notify != nil {
args.Notify(err, next)
}
// Wait for the next backoff period or context cancellation.
args.Timer.Start(next)
select {
case <-args.Timer.C():
case <-ctx.Done():
return res, context.Cause(ctx)
}
}
}

View File

@@ -1,7 +1,6 @@
package backoff
import (
"context"
"sync"
"time"
)
@@ -14,8 +13,7 @@ type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
ctx context.Context
timer Timer
timer timer
stop chan struct{}
stopOnce sync.Once
}
@@ -27,22 +25,12 @@ type Ticker struct {
// provided backoff policy (notably calling NextBackOff or Reset)
// while the ticker is running.
func NewTicker(b BackOff) *Ticker {
return NewTickerWithTimer(b, &defaultTimer{})
}
// NewTickerWithTimer returns a new Ticker with a custom timer.
// A default timer that uses system timer is used when nil is passed.
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
if timer == nil {
timer = &defaultTimer{}
}
c := make(chan time.Time)
t := &Ticker{
C: c,
c: c,
b: b,
ctx: getContext(b),
timer: timer,
timer: &defaultTimer{},
stop: make(chan struct{}),
}
t.b.Reset()
@@ -73,8 +61,6 @@ func (t *Ticker) run() {
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
case <-t.ctx.Done():
return
}
}
}

View File

@@ -2,7 +2,7 @@ package backoff
import "time"
type Timer interface {
type timer interface {
Start(duration time.Duration)
Stop()
C() <-chan time.Time

View File

@@ -1,6 +1,5 @@
- *赞助 BTC: 1Cbd6oGAUUyBi7X7MaR4np4nTmQZXVgkCW*
- *赞助 ETH: 0x623A3C3a72186A6336C79b18Ac1eD36e1c71A8a6*
- *Go语言付费QQ群: 1055927514*
- *Go语言QQ群: 102319854, 1055927514*
- *凹语言(凹读音“Wa”)(The Wa Programming Language): https://github.com/wa-lang/wa*
----

View File

@@ -47,7 +47,7 @@ var formulaTable = map[string]func(n int) int{
return 0
},
fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int {
if n <= 1 {
if n == 1 {
return 0
}
return 1

View File

@@ -29,6 +29,9 @@ func decodePoString(text string) string {
break
}
switch line[i+1] {
case 'r': // \\r -> \r
data = append(data, '\r')
i++
case 'n': // \\n -> \n
data = append(data, '\n')
i++
@@ -62,6 +65,8 @@ func encodePoString(text string) string {
buf.WriteString(`\\`)
case '"':
buf.WriteString(`\"`)
case '\r':
buf.WriteString(`\r`)
case '\n':
buf.WriteString(`\n`)
case '\t':
@@ -96,6 +101,8 @@ func encodeCommentPoString(text string) string {
buf.WriteString(`\\`)
case '"':
buf.WriteString(`\"`)
case '\r':
buf.WriteString(`\r`)
case '\n':
buf.WriteString(`\n`)
case '\t':

View File

@@ -1,4 +1,5 @@
// +build amd64
//go:build amd64 && !purego
// +build amd64,!purego
#include "textflag.h"

View File

@@ -1,4 +1,5 @@
// +build amd64
//go:build amd64 && !purego
// +build amd64,!purego
#include "textflag.h"

View File

@@ -18,6 +18,9 @@ func (Curve) Identity() *Point {
func (Curve) IsOnCurve(P *Point) bool {
x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
rhs, lhs := &fp.Elt{}, &fp.Elt{}
// Check z != 0
eq0 := !fp.IsZero(&P.z)
fp.Mul(t, &P.ta, &P.tb) // t = ta*tb
fp.Sqr(x2, &P.x) // x^2
fp.Sqr(y2, &P.y) // y^2
@@ -27,13 +30,14 @@ func (Curve) IsOnCurve(P *Point) bool {
fp.Mul(rhs, t2, &paramD) // dt^2
fp.Add(rhs, rhs, z2) // z^2 + dt^2
fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2)
eq0 := fp.IsZero(lhs)
eq1 := fp.IsZero(lhs)
fp.Mul(lhs, &P.x, &P.y) // xy
fp.Mul(rhs, t, &P.z) // tz
fp.Sub(lhs, lhs, rhs) // xy - tz
eq1 := fp.IsZero(lhs)
return eq0 && eq1
eq2 := fp.IsZero(lhs)
return eq0 && eq1 && eq2
}
// Generator returns the generator point.

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"math/big"
"strings"
"golang.org/x/crypto/cryptobyte"
)
// BytesLe2Hex returns an hexadecimal string of a number stored in a
@@ -138,3 +140,34 @@ func BigInt2Uint64Le(z []uint64, x *big.Int) {
z[i] = 0
}
}
// MarshalBinary encodes a value into a byte array in a format readable by UnmarshalBinary.
func MarshalBinary(v cryptobyte.MarshalingValue) ([]byte, error) {
const DefaultSize = 32
b := cryptobyte.NewBuilder(make([]byte, 0, DefaultSize))
b.AddValue(v)
return b.Bytes()
}
// MarshalBinaryLen encodes a value into an array of n bytes in a format readable by UnmarshalBinary.
func MarshalBinaryLen(v cryptobyte.MarshalingValue, length uint) ([]byte, error) {
b := cryptobyte.NewFixedBuilder(make([]byte, 0, length))
b.AddValue(v)
return b.Bytes()
}
// A UnmarshalingValue decodes itself from a cryptobyte.String and advances the pointer.
// It reports whether the read was successful.
type UnmarshalingValue interface {
Unmarshal(*cryptobyte.String) bool
}
// UnmarshalBinary recovers a value from a byte array.
// It returns an error if the read was unsuccessful.
func UnmarshalBinary(v UnmarshalingValue, data []byte) (err error) {
s := cryptobyte.String(data)
if data == nil || !v.Unmarshal(&s) || !s.Empty() {
err = fmt.Errorf("cannot read %T from input string", v)
}
return
}

View File

@@ -1,4 +1,5 @@
// +build amd64
//go:build amd64 && !purego
// +build amd64,!purego
#include "textflag.h"
#include "fp_amd64.h"

View File

@@ -1,4 +1,5 @@
// +build amd64
//go:build amd64 && !purego
// +build amd64,!purego
#include "textflag.h"
#include "fp_amd64.h"

16
vendor/github.com/cloudflare/circl/math/integer.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
package math
import "math/bits"
// NextPow2 finds the next power of two (N=2^k, k>=0) greater than n.
// If n is already a power of two, then this function returns n, and log2(n).
func NextPow2(n uint) (N uint, k uint) {
if bits.OnesCount(n) == 1 {
k = uint(bits.TrailingZeros(n))
N = n
} else {
k = uint(bits.Len(n))
N = uint(1) << k
}
return
}

View File

@@ -164,7 +164,7 @@ func (P *pointR1) isEqual(Q *pointR1) bool {
fp.Mul(r, r, &P.z)
fp.Sub(l, l, r)
b = b && fp.IsZero(l)
return b
return b && !fp.IsZero(&P.z) && !fp.IsZero(&Q.z)
}
func (P *pointR3) neg() {

View File

@@ -206,7 +206,7 @@ func newKeyFromSeed(privateKey, seed []byte) {
func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
if len(ctx) > ContextMaxSize {
panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx))))
panic(fmt.Errorf("ed448: bad context length: %v", len(ctx)))
}
H := sha3.NewShake256()

View File

@@ -107,4 +107,7 @@ var (
// ErrContextNotSupported is the error used if a context is not
// supported.
ErrContextNotSupported = errors.New("context not supported")
// ErrContextTooLong is the error used if the context string is too long.
ErrContextTooLong = errors.New("context string too long")
)

View File

@@ -1,149 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"context"
"github.com/containerd/log"
)
// G is a shorthand for [GetLogger].
//
// Deprecated: use [log.G].
var G = log.G
// L is an alias for the standard logger.
//
// Deprecated: use [log.L].
var L = log.L
// Fields type to pass to "WithFields".
//
// Deprecated: use [log.Fields].
type Fields = log.Fields
// Entry is a logging entry.
//
// Deprecated: use [log.Entry].
type Entry = log.Entry
// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using
// zeros to ensure the formatted time is always the same number of
// characters.
//
// Deprecated: use [log.RFC3339NanoFixed].
const RFC3339NanoFixed = log.RFC3339NanoFixed
// Level is a logging level.
//
// Deprecated: use [log.Level].
type Level = log.Level
// Supported log levels.
const (
// TraceLevel level.
//
// Deprecated: use [log.TraceLevel].
TraceLevel Level = log.TraceLevel
// DebugLevel level.
//
// Deprecated: use [log.DebugLevel].
DebugLevel Level = log.DebugLevel
// InfoLevel level.
//
// Deprecated: use [log.InfoLevel].
InfoLevel Level = log.InfoLevel
// WarnLevel level.
//
// Deprecated: use [log.WarnLevel].
WarnLevel Level = log.WarnLevel
// ErrorLevel level
//
// Deprecated: use [log.ErrorLevel].
ErrorLevel Level = log.ErrorLevel
// FatalLevel level.
//
// Deprecated: use [log.FatalLevel].
FatalLevel Level = log.FatalLevel
// PanicLevel level.
//
// Deprecated: use [log.PanicLevel].
PanicLevel Level = log.PanicLevel
)
// SetLevel sets log level globally. It returns an error if the given
// level is not supported.
//
// Deprecated: use [log.SetLevel].
func SetLevel(level string) error {
return log.SetLevel(level)
}
// GetLevel returns the current log level.
//
// Deprecated: use [log.GetLevel].
func GetLevel() log.Level {
return log.GetLevel()
}
// OutputFormat specifies a log output format.
//
// Deprecated: use [log.OutputFormat].
type OutputFormat = log.OutputFormat
// Supported log output formats.
const (
// TextFormat represents the text logging format.
//
// Deprecated: use [log.TextFormat].
TextFormat log.OutputFormat = "text"
// JSONFormat represents the JSON logging format.
//
// Deprecated: use [log.JSONFormat].
JSONFormat log.OutputFormat = "json"
)
// SetFormat sets the log output format.
//
// Deprecated: use [log.SetFormat].
func SetFormat(format OutputFormat) error {
return log.SetFormat(format)
}
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
//
// Deprecated: use [log.WithLogger].
func WithLogger(ctx context.Context, logger *log.Entry) context.Context {
return log.WithLogger(ctx, logger)
}
// GetLogger retrieves the current logger from the context. If no logger is
// available, the default logger is returned.
//
// Deprecated: use [log.GetLogger].
func GetLogger(ctx context.Context) *log.Entry {
return log.GetLogger(ctx)
}

191
vendor/github.com/containerd/errdefs/pkg/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,96 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errhttp provides utility functions for translating errors to
// and from a HTTP context.
//
// The functions ToHTTP and ToNative can be used to map server-side and
// client-side errors to the correct types.
package errhttp
import (
"errors"
"net/http"
"github.com/containerd/errdefs"
"github.com/containerd/errdefs/pkg/internal/cause"
)
// ToHTTP returns the best status code for the given error
func ToHTTP(err error) int {
switch {
case errdefs.IsNotFound(err):
return http.StatusNotFound
case errdefs.IsInvalidArgument(err):
return http.StatusBadRequest
case errdefs.IsConflict(err):
return http.StatusConflict
case errdefs.IsNotModified(err):
return http.StatusNotModified
case errdefs.IsFailedPrecondition(err):
return http.StatusPreconditionFailed
case errdefs.IsUnauthorized(err):
return http.StatusUnauthorized
case errdefs.IsPermissionDenied(err):
return http.StatusForbidden
case errdefs.IsResourceExhausted(err):
return http.StatusTooManyRequests
case errdefs.IsInternal(err):
return http.StatusInternalServerError
case errdefs.IsNotImplemented(err):
return http.StatusNotImplemented
case errdefs.IsUnavailable(err):
return http.StatusServiceUnavailable
case errdefs.IsUnknown(err):
var unexpected cause.ErrUnexpectedStatus
if errors.As(err, &unexpected) && unexpected.Status >= 200 && unexpected.Status < 600 {
return unexpected.Status
}
return http.StatusInternalServerError
default:
return http.StatusInternalServerError
}
}
// ToNative returns the error best matching the HTTP status code
func ToNative(statusCode int) error {
switch statusCode {
case http.StatusNotFound:
return errdefs.ErrNotFound
case http.StatusBadRequest:
return errdefs.ErrInvalidArgument
case http.StatusConflict:
return errdefs.ErrConflict
case http.StatusPreconditionFailed:
return errdefs.ErrFailedPrecondition
case http.StatusUnauthorized:
return errdefs.ErrUnauthenticated
case http.StatusForbidden:
return errdefs.ErrPermissionDenied
case http.StatusNotModified:
return errdefs.ErrNotModified
case http.StatusTooManyRequests:
return errdefs.ErrResourceExhausted
case http.StatusInternalServerError:
return errdefs.ErrInternal
case http.StatusNotImplemented:
return errdefs.ErrNotImplemented
case http.StatusServiceUnavailable:
return errdefs.ErrUnavailable
default:
return cause.ErrUnexpectedStatus{Status: statusCode}
}
}

View File

@@ -0,0 +1,33 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cause is used to define root causes for errors
// common to errors packages like grpc and http.
package cause
import "fmt"
type ErrUnexpectedStatus struct {
Status int
}
const UnexpectedStatusPrefix = "unexpected status "
func (e ErrUnexpectedStatus) Error() string {
return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status)
}
func (ErrUnexpectedStatus) Unknown() {}

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,689 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"runtime"
"strings"
"sync"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"golang.org/x/sync/errgroup"
)
type options struct {
chunkSize int
compressionLevel int
prioritizedFiles []string
missedPrioritizedFiles *[]string
compression Compression
ctx context.Context
minChunkSize int
}
type Option func(o *options) error
// WithChunkSize option specifies the chunk size of eStargz blob to build.
func WithChunkSize(chunkSize int) Option {
return func(o *options) error {
o.chunkSize = chunkSize
return nil
}
}
// WithCompressionLevel option specifies the gzip compression level.
// The default is gzip.BestCompression.
// This option will be ignored if WithCompression option is used.
// See also: https://godoc.org/compress/gzip#pkg-constants
func WithCompressionLevel(level int) Option {
return func(o *options) error {
o.compressionLevel = level
return nil
}
}
// WithPrioritizedFiles option specifies the list of prioritized files.
// These files must be complete paths that are absolute or relative to "/"
// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
// are treated as "/foo/bar".
func WithPrioritizedFiles(files []string) Option {
return func(o *options) error {
o.prioritizedFiles = files
return nil
}
}
// WithAllowPrioritizeNotFound makes Build continue the execution even if some
// of prioritized files specified by WithPrioritizedFiles option aren't found
// in the input tar. Instead, this records all missed file names to the passed
// slice.
func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
return func(o *options) error {
if missedFiles == nil {
return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
}
o.missedPrioritizedFiles = missedFiles
return nil
}
}
// WithCompression specifies compression algorithm to be used.
// Default is gzip.
func WithCompression(compression Compression) Option {
return func(o *options) error {
o.compression = compression
return nil
}
}
// WithContext specifies a context that can be used for clean canceleration.
func WithContext(ctx context.Context) Option {
return func(o *options) error {
o.ctx = ctx
return nil
}
}
// WithMinChunkSize option specifies the minimal number of bytes of data
// must be written in one gzip stream.
// By increasing this number, one gzip stream can contain multiple files
// and it hopefully leads to smaller result blob.
// NOTE: This adds a TOC property that old reader doesn't understand.
func WithMinChunkSize(minChunkSize int) Option {
return func(o *options) error {
o.minChunkSize = minChunkSize
return nil
}
}
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
diffID digest.Digester
tocDigest digest.Digest
}
// DiffID returns the digest of uncompressed blob.
// It is only valid to call DiffID after Close.
func (b *Blob) DiffID() digest.Digest {
return b.diffID.Digest()
}
// TOCDigest returns the digest of uncompressed TOC JSON.
func (b *Blob) TOCDigest() digest.Digest {
return b.tocDigest
}
// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
// or plain tar) passed through the argument. If there are some prioritized files are listed in
// the option, these files are grouped as "prioritized" and can be used for runtime optimization
// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
var opts options
opts.compressionLevel = gzip.BestCompression // BestCompression by default
for _, o := range opt {
if err := o(&opts); err != nil {
return nil, err
}
}
if opts.compression == nil {
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
}
layerFiles := newTempFiles()
ctx := opts.ctx
if ctx == nil {
ctx = context.Background()
}
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-done:
// nop
case <-ctx.Done():
layerFiles.CleanupAll()
}
}()
defer func() {
if rErr != nil {
if err := layerFiles.CleanupAll(); err != nil {
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
}
}
if cErr := ctx.Err(); cErr != nil {
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
}
}()
tarBlob, err := decompressBlob(tarBlob, layerFiles)
if err != nil {
return nil, err
}
entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
if err != nil {
return nil, err
}
var tarParts [][]*entry
if opts.minChunkSize > 0 {
// Each entry needs to know the size of the current gzip stream so they
// cannot be processed in parallel.
tarParts = [][]*entry{entries}
} else {
tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
}
writers := make([]*Writer, len(tarParts))
payloads := make([]*os.File, len(tarParts))
var mu sync.Mutex
var eg errgroup.Group
for i, parts := range tarParts {
i, parts := i, parts
// builds verifiable stargz sub-blobs
eg.Go(func() error {
esgzFile, err := layerFiles.TempFile("", "esgzdata")
if err != nil {
return err
}
sw := NewWriterWithCompressor(esgzFile, opts.compression)
sw.ChunkSize = opts.chunkSize
sw.MinChunkSize = opts.minChunkSize
if sw.needsOpenGzEntries == nil {
sw.needsOpenGzEntries = make(map[string]struct{})
}
for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
sw.needsOpenGzEntries[f] = struct{}{}
}
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
return err
}
mu.Lock()
writers[i] = sw
payloads[i] = esgzFile
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
rErr = err
return nil, err
}
tocAndFooter, tocDgst, err := closeWithCombine(writers...)
if err != nil {
rErr = err
return nil, err
}
var rs []io.Reader
for _, p := range payloads {
fs, err := fileSectionReader(p)
if err != nil {
return nil, err
}
rs = append(rs, fs)
}
diffID := digest.Canonical.Digester()
pr, pw := io.Pipe()
go func() {
r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
if err != nil {
pw.CloseWithError(err)
return
}
defer r.Close()
if _, err := io.Copy(diffID.Hash(), r); err != nil {
pw.CloseWithError(err)
return
}
pw.Close()
}()
return &Blob{
ReadCloser: readCloser{
Reader: pr,
closeFunc: layerFiles.CleanupAll,
},
tocDigest: tocDgst,
diffID: diffID,
}, nil
}
// closeWithCombine takes unclosed Writers and close them. This also returns the
// toc that combined all Writers into.
// Writers doesn't write TOC and footer to the underlying writers so they can be
// combined into a single eStargz and tocAndFooter returned by this function can
// be appended at the tail of that combined blob.
func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
if len(ws) == 0 {
return nil, "", fmt.Errorf("at least one writer must be passed")
}
for _, w := range ws {
if w.closed {
return nil, "", fmt.Errorf("writer must be unclosed")
}
defer func(w *Writer) { w.closed = true }(w)
if err := w.closeGz(); err != nil {
return nil, "", err
}
if err := w.bw.Flush(); err != nil {
return nil, "", err
}
}
var (
mtoc = new(JTOC)
currentOffset int64
)
mtoc.Version = ws[0].toc.Version
for _, w := range ws {
for _, e := range w.toc.Entries {
// Recalculate Offset of non-empty files/chunks
if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
e.Offset += currentOffset
}
mtoc.Entries = append(mtoc.Entries, e)
}
if w.toc.Version > mtoc.Version {
mtoc.Version = w.toc.Version
}
currentOffset += w.cw.n
}
return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
}
func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
buf := new(bytes.Buffer)
tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
if err != nil {
return nil, "", err
}
return buf, tocDigest, nil
}
// divideEntries divides passed entries to the parts at least the number specified by the
// argument.
func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
var estimatedSize int64
for _, e := range entries {
estimatedSize += e.header.Size
}
unitSize := estimatedSize / int64(minPartsNum)
var (
nextEnd = unitSize
offset int64
)
set = append(set, []*entry{})
for _, e := range entries {
set[len(set)-1] = append(set[len(set)-1], e)
offset += e.header.Size
if offset > nextEnd {
set = append(set, []*entry{})
nextEnd += unitSize
}
}
return
}
var errNotFound = errors.New("not found")
// sortEntries reads the specified tar blob and returns a list of tar entries.
// If some of prioritized files are specified, the list starts from these
// files with keeping the order specified by the argument.
func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
// Import tar file.
intar, err := importTar(in)
if err != nil {
return nil, fmt.Errorf("failed to sort: %w", err)
}
// Sort the tar file respecting to the prioritized files list.
sorted := &tarFile{}
for _, l := range prioritized {
if err := moveRec(l, intar, sorted); err != nil {
if errors.Is(err, errNotFound) && missedPrioritized != nil {
*missedPrioritized = append(*missedPrioritized, l)
continue // allow not found
}
return nil, fmt.Errorf("failed to sort tar entries: %w", err)
}
}
if len(prioritized) == 0 {
sorted.add(&entry{
header: &tar.Header{
Name: NoPrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
} else {
sorted.add(&entry{
header: &tar.Header{
Name: PrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
}
// Dump all entry and concatinate them.
return append(sorted.dump(), intar.dump()...), nil
}
// readerFromEntries returns a reader of tar archive that contains entries passed
// through the arguments.
func readerFromEntries(entries ...*entry) io.Reader {
pr, pw := io.Pipe()
go func() {
tw := tar.NewWriter(pw)
defer tw.Close()
for _, entry := range entries {
if err := tw.WriteHeader(entry.header); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
return
}
if _, err := io.Copy(tw, entry.payload); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
return
}
}
pw.Close()
}()
return pr
}
func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReadSeeker(in)
if err != nil {
return nil, fmt.Errorf("failed to make position watcher: %w", err)
}
tr := tar.NewReader(pw)
// Walk through all nodes.
for {
// Fetch and parse next header.
h, err := tr.Next()
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
switch cleanEntryName(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark:
// Ignore existing landmark
continue
}
// Add entry. If it already exists, replace it.
if _, ok := tf.get(h.Name); ok {
tf.remove(h.Name)
}
tf.add(&entry{
header: h,
payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
})
}
return tf, nil
}
func moveRec(name string, in *tarFile, out *tarFile) error {
name = cleanEntryName(name)
if name == "" { // root directory. stop recursion.
if e, ok := in.get(name); ok {
// entry of the root directory exists. we should move it as well.
// this case will occur if tar entries are prefixed with "./", "/", etc.
out.add(e)
in.remove(name)
}
return nil
}
_, okIn := in.get(name)
_, okOut := out.get(name)
if !okIn && !okOut {
return fmt.Errorf("file: %q: %w", name, errNotFound)
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
if err := moveRec(parent, in, out); err != nil {
return err
}
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
if err := moveRec(e.header.Linkname, in, out); err != nil {
return err
}
}
if e, ok := in.get(name); ok {
out.add(e)
in.remove(name)
}
return nil
}
type entry struct {
header *tar.Header
payload io.ReadSeeker
}
type tarFile struct {
index map[string]*entry
stream []*entry
}
func (f *tarFile) add(e *entry) {
if f.index == nil {
f.index = make(map[string]*entry)
}
f.index[cleanEntryName(e.header.Name)] = e
f.stream = append(f.stream, e)
}
func (f *tarFile) remove(name string) {
name = cleanEntryName(name)
if f.index != nil {
delete(f.index, name)
}
var filtered []*entry
for _, e := range f.stream {
if cleanEntryName(e.header.Name) == name {
continue
}
filtered = append(filtered, e)
}
f.stream = filtered
}
func (f *tarFile) get(name string) (e *entry, ok bool) {
if f.index == nil {
return nil, false
}
e, ok = f.index[cleanEntryName(name)]
return
}
func (f *tarFile) dump() []*entry {
return f.stream
}
type readCloser struct {
io.Reader
closeFunc func() error
}
func (rc readCloser) Close() error {
return rc.closeFunc()
}
func fileSectionReader(file *os.File) (*io.SectionReader, error) {
info, err := file.Stat()
if err != nil {
return nil, err
}
return io.NewSectionReader(file, 0, info.Size()), nil
}
func newTempFiles() *tempFiles {
return &tempFiles{}
}
type tempFiles struct {
files []*os.File
filesMu sync.Mutex
cleanupOnce sync.Once
}
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
f, err := os.CreateTemp(dir, pattern)
if err != nil {
return nil, err
}
tf.filesMu.Lock()
tf.files = append(tf.files, f)
tf.filesMu.Unlock()
return f, nil
}
func (tf *tempFiles) CleanupAll() (err error) {
tf.cleanupOnce.Do(func() {
err = tf.cleanupAll()
})
return
}
func (tf *tempFiles) cleanupAll() error {
tf.filesMu.Lock()
defer tf.filesMu.Unlock()
var allErr []error
for _, f := range tf.files {
if err := f.Close(); err != nil {
allErr = append(allErr, err)
}
if err := os.Remove(f.Name()); err != nil {
allErr = append(allErr, err)
}
}
tf.files = nil
return errorutil.Aggregate(allErr)
}
func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
pos := int64(0)
return &countReadSeeker{r: r, cPos: &pos}, nil
}
type countReadSeeker struct {
r io.ReaderAt
cPos *int64
mu sync.Mutex
}
func (cr *countReadSeeker) Read(p []byte) (int, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
n, err := cr.r.ReadAt(p, *cr.cPos)
if err == nil {
*cr.cPos += int64(n)
}
return n, err
}
func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
switch whence {
default:
return 0, fmt.Errorf("Unknown whence: %v", whence)
case io.SeekStart:
case io.SeekCurrent:
offset += *cr.cPos
case io.SeekEnd:
return 0, fmt.Errorf("Unsupported whence: %v", whence)
}
if offset < 0 {
return 0, fmt.Errorf("invalid offset")
}
*cr.cPos = offset
return offset, nil
}
func (cr *countReadSeeker) currentPos() int64 {
cr.mu.Lock()
defer cr.mu.Unlock()
return *cr.cPos
}
func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
if org.Size() < 4 {
return org, nil
}
src := make([]byte, 4)
if _, err := org.Read(src); err != nil && err != io.EOF {
return nil, err
}
var dR io.Reader
if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
// gzip
dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
if err != nil {
return nil, err
}
defer dgR.Close()
dR = io.Reader(dgR)
} else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
// zstd
dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
if err != nil {
return nil, err
}
defer dzR.Close()
dR = io.Reader(dzR)
} else {
// uncompressed
return io.NewSectionReader(org, 0, org.Size()), nil
}
b, err := tmp.TempFile("", "uncompresseddata")
if err != nil {
return nil, err
}
if _, err := io.Copy(b, dR); err != nil {
return nil, err
}
return fileSectionReader(b)
}

View File

@@ -0,0 +1,40 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errorutil
import (
"errors"
"fmt"
"strings"
)
// Aggregate combines a list of errors into a single new error.
func Aggregate(errs []error) error {
switch len(errs) {
case 0:
return nil
case 1:
return errs[0]
default:
points := make([]string, len(errs)+1)
points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
for i, err := range errs {
points[i+1] = fmt.Sprintf("* %s", err)
}
return errors.New(strings.Join(points, "\n\t"))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,237 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/binary"
"encoding/json"
"fmt"
"hash"
"io"
"strconv"
digest "github.com/opencontainers/go-digest"
)
type gzipCompression struct {
*GzipCompressor
*GzipDecompressor
}
func newGzipCompressionWithLevel(level int) Compression {
return &gzipCompression{
&GzipCompressor{level},
&GzipDecompressor{},
}
}
func NewGzipCompressor() *GzipCompressor {
return &GzipCompressor{gzip.BestCompression}
}
func NewGzipCompressorWithLevel(level int) *GzipCompressor {
return &GzipCompressor{level}
}
type GzipCompressor struct {
compressionLevel int
}
func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
return gzip.NewWriterLevel(w, gc.compressionLevel)
}
func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
tocJSON, err := json.MarshalIndent(toc, "", "\t")
if err != nil {
return "", err
}
gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
gw := io.Writer(gz)
if diffHash != nil {
gw = io.MultiWriter(gz, diffHash)
}
tw := tar.NewWriter(gw)
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: TOCTarName,
Size: int64(len(tocJSON)),
}); err != nil {
return "", err
}
if _, err := tw.Write(tocJSON); err != nil {
return "", err
}
if err := tw.Close(); err != nil {
return "", err
}
if err := gz.Close(); err != nil {
return "", err
}
if _, err := w.Write(gzipFooterBytes(off)); err != nil {
return "", err
}
return digest.FromBytes(tocJSON), nil
}
// gzipFooterBytes returns the 51 bytes footer.
func gzipFooterBytes(tocOff int64) []byte {
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
// Extra header indicating the offset of TOCJSON
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
header := make([]byte, 4)
header[0], header[1] = 'S', 'G'
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
gz.Header.Extra = append(header, []byte(subfield)...)
gz.Close()
if buf.Len() != FooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
}
return buf.Bytes()
}
type GzipDecompressor struct{}
func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(r)
}
func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
return parseTOCEStargz(r)
}
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
if len(p) != FooterSize {
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, 0, 0, err
}
defer zr.Close()
extra := zr.Header.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
}
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
}
if string(subfield[16:]) != "STARGZ" {
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
}
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
return tocOffset, tocOffset, 0, nil
}
func (gz *GzipDecompressor) FooterSize() int64 {
return FooterSize
}
func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
return decompressTOCEStargz(r)
}
type LegacyGzipDecompressor struct{}
func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(r)
}
func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
return parseTOCEStargz(r)
}
func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
if len(p) != legacyFooterSize {
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
}
defer zr.Close()
extra := zr.Header.Extra
if len(extra) != 16+len("STARGZ") {
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}
if string(extra[16:]) != "STARGZ" {
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
}
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
return tocOffset, tocOffset, 0, nil
}
func (gz *LegacyGzipDecompressor) FooterSize() int64 {
return legacyFooterSize
}
func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
return decompressTOCEStargz(r)
}
func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
tr, err := decompressTOCEStargz(r)
if err != nil {
return nil, "", err
}
dgstr := digest.Canonical.Digester()
toc = new(JTOC)
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
}
if err := tr.Close(); err != nil {
return nil, "", err
}
return toc, dgstr.Digest(), nil
}
func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
zr, err := gzip.NewReader(r)
if err != nil {
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
}
zr.Multistream(false)
tr := tar.NewReader(zr)
h, err := tr.Next()
if err != nil {
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
}
if h.Name != TOCTarName {
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
}
return readCloser{tr, zr.Close}, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,342 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"hash"
"io"
"os"
"path"
"time"
digest "github.com/opencontainers/go-digest"
)
const (
// TOCTarName is the name of the JSON file in the tar archive in the
// table of contents gzip stream.
TOCTarName = "stargz.index.json"
// FooterSize is the number of bytes in the footer
//
// The footer is an empty gzip stream with no compression and an Extra
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
// number is the offset to the gzip stream of JSON TOC.
//
// 51 comes from:
//
// 10 bytes gzip header
// 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
// 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
// 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
// 5 bytes flate header
// 8 bytes gzip footer
// (End of the eStargz blob)
//
// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
FooterSize = 51
// legacyFooterSize is the number of bytes in the legacy stargz footer.
//
// 47 comes from:
//
// 10 byte gzip header +
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
// 5 byte flate header
// 8 byte gzip footer (two little endian uint32s: digest, size)
legacyFooterSize = 47
// TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
// digest of the TOC JSON.
// This annotation is valid only when it is specified in `.[]layers.annotations`
// of an image manifest.
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
// pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
// to the runtime but current OCI image doesn't ship this information by default. So we store this
// to the special annotation.
StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
// PrefetchLandmark is a file entry which indicates the end position of
// prefetch in the stargz file.
PrefetchLandmark = ".prefetch.landmark"
// NoPrefetchLandmark is a file entry which indicates that no prefetch should
// occur in the stargz file.
NoPrefetchLandmark = ".no.prefetch.landmark"
landmarkContents = 0xf
)
// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
type JTOC struct {
Version int `json:"version"`
Entries []*TOCEntry `json:"entries"`
}
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
type TOCEntry struct {
// Name is the tar entry's name. It is the complete path
// stored in the tar file, not just the base name.
Name string `json:"name"`
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
// "block", "fifo", or "chunk".
// The "chunk" type is used for regular file data chunks past the first
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
// ChunkOffset, and ChunkSize populated.
Type string `json:"type"`
// Size, for regular files, is the logical size of the file.
Size int64 `json:"size,omitempty"`
// ModTime3339 is the modification time of the tar entry. Empty
// means zero or unknown. Otherwise it's in UTC RFC3339
// format. Use the ModTime method to access the time.Time value.
ModTime3339 string `json:"modtime,omitempty"`
modTime time.Time
// LinkName, for symlinks and hardlinks, is the link target.
LinkName string `json:"linkName,omitempty"`
// Mode is the permission and mode bits.
Mode int64 `json:"mode,omitempty"`
// UID is the user ID of the owner.
UID int `json:"uid,omitempty"`
// GID is the group ID of the owner.
GID int `json:"gid,omitempty"`
// Uname is the username of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same UID.
Uname string `json:"userName,omitempty"`
// Gname is the group name of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same GID.
Gname string `json:"groupName,omitempty"`
// Offset, for regular files, provides the offset in the
// stargz file to the file's data bytes. See ChunkOffset and
// ChunkSize.
Offset int64 `json:"offset,omitempty"`
// InnerOffset is an optional field indicates uncompressed offset
// of this "reg" or "chunk" payload in a stream starts from Offset.
// This field enables to put multiple "reg" or "chunk" payloads
// in one chunk with having the same Offset but different InnerOffset.
InnerOffset int64 `json:"innerOffset,omitempty"`
nextOffset int64 // the Offset of the next entry with a non-zero Offset
// DevMajor is the major device number for "char" and "block" types.
DevMajor int `json:"devMajor,omitempty"`
// DevMinor is the major device number for "char" and "block" types.
DevMinor int `json:"devMinor,omitempty"`
// NumLink is the number of entry names pointing to this entry.
// Zero means one name references this entry.
// This field is calculated during runtime and not recorded in TOC JSON.
NumLink int `json:"-"`
// Xattrs are the extended attribute for the entry.
Xattrs map[string][]byte `json:"xattrs,omitempty"`
// Digest stores the OCI checksum for regular files payload.
// It has the form "sha256:abcdef01234....".
Digest string `json:"digest,omitempty"`
// ChunkOffset is non-zero if this is a chunk of a large,
// regular file. If so, the Offset is where the gzip header of
// ChunkSize bytes at ChunkOffset in Name begin.
//
// In serialized form, a "chunkSize" JSON field of zero means
// that the chunk goes to the end of the file. After reading
// from the stargz TOC, though, the ChunkSize is initialized
// to a non-zero file for when Type is either "reg" or
// "chunk".
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkSize int64 `json:"chunkSize,omitempty"`
// ChunkDigest stores an OCI digest of the chunk. This must be formed
// as "sha256:0123abcd...".
ChunkDigest string `json:"chunkDigest,omitempty"`
children map[string]*TOCEntry
// chunkTopIndex is index of the entry where Offset starts in the blob.
chunkTopIndex int
}
// ModTime returns the entry's modification time.
func (e *TOCEntry) ModTime() time.Time { return e.modTime }
// NextOffset returns the position (relative to the start of the
// stargz file) of the next gzip boundary after e.Offset.
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
if e.children == nil {
e.children = make(map[string]*TOCEntry)
}
if child.Type == "dir" {
e.NumLink++ // Entry ".." in the subdirectory links to this directory
}
e.children[baseName] = child
}
// isDataType reports whether TOCEntry is a regular file or chunk (something that
// contains regular file data).
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
// Stat returns a FileInfo value representing e.
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
// ForeachChild calls f for each child item. If f returns false, iteration ends.
// If e is not a directory, f is not called.
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
for name, ent := range e.children {
if !f(name, ent) {
return
}
}
}
// LookupChild returns the directory e's child by its base name.
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
child, ok = e.children[baseName]
return
}
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
type fileInfo struct{ e *TOCEntry }
var _ os.FileInfo = fileInfo{}
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
func (fi fileInfo) Size() int64 { return fi.e.Size }
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
func (fi fileInfo) Sys() interface{} { return fi.e }
func (fi fileInfo) Mode() (m os.FileMode) {
// TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
(os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
switch fi.e.Type {
case "dir":
m |= os.ModeDir
case "symlink":
m |= os.ModeSymlink
case "char":
m |= os.ModeDevice | os.ModeCharDevice
case "block":
m |= os.ModeDevice
case "fifo":
m |= os.ModeNamedPipe
}
return m
}
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
// in a eStargz blob.
type TOCEntryVerifier interface {
// Verifier provides a content verifier that can be used for verifying the
// contents of the specified TOCEntry.
Verifier(ce *TOCEntry) (digest.Verifier, error)
}
// Compression provides the compression helper to be used creating and parsing eStargz.
// This package provides gzip-based Compression by default, but any compression
// algorithm (e.g. zstd) can be used as long as it implements Compression.
type Compression interface {
Compressor
Decompressor
}
// Compressor represents the helper mothods to be used for creating eStargz.
type Compressor interface {
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
// Everytime a chunk is written, the WriteCloser is closed and Writer is
// called again for writing the next chunk.
//
// The returned writer should implement "Flush() error" function that flushes
// any pending compressed data to the underlying writer.
Writer(w io.Writer) (WriteFlushCloser, error)
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
// WriteTOCAndFooter can optionally write anything that affects DiffID calculation
// (e.g. uncompressed TOC JSON).
//
// This function returns tocDgst that represents the digest of TOC that will be used
// to verify this blob when it's parsed.
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
}
// Decompressor represents the helper mothods to be used for parsing eStargz.
type Decompressor interface {
// Reader returns ReadCloser to be used for decompressing file payload.
Reader(r io.Reader) (io.ReadCloser, error)
// FooterSize returns the size of the footer of this blob.
FooterSize() int64
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
// the top until the TOC JSON).
//
// If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
// to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
//
// tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
// footer (blob size - tocOff - FooterSize).
// If blobPayloadSize < 0, blobPayloadSize become the blob size.
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
// of the underlying blob that has the range specified by ParseFooter method.
//
// This function returns tocDgst that represents the digest of TOC that will be used
// to verify this blob. This must match to the value returned from
// Compressor.WriteTOCAndFooter that is used when creating this blob.
//
// If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
// Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
// and return it.
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
}
type WriteFlushCloser interface {
io.WriteCloser
Flush() error
}

View File

@@ -154,40 +154,65 @@ var supportedAlgorithms = map[string]bool{
EdDSA: true,
}
// ProviderConfig allows creating providers when discovery isn't supported. It's
// generally easier to use NewProvider directly.
// ProviderConfig allows direct creation of a [Provider] from metadata
// configuration. This is intended for interop with providers that don't support
// discovery, or host the JSON discovery document at an off-spec path.
//
// The ProviderConfig struct specifies JSON struct tags to support document
// parsing.
//
// // Directly fetch the metadata document.
// resp, err := http.Get("https://login.example.com/custom-metadata-path")
// if err != nil {
// // ...
// }
// defer resp.Body.Close()
//
// // Parse config from JSON metadata.
// config := &oidc.ProviderConfig{}
// if err := json.NewDecoder(resp.Body).Decode(config); err != nil {
// // ...
// }
// p := config.NewProvider(context.Background())
//
// For providers that implement discovery, use [NewProvider] instead.
//
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
type ProviderConfig struct {
// IssuerURL is the identity of the provider, and the string it uses to sign
// ID tokens with. For example "https://accounts.google.com". This value MUST
// match ID tokens exactly.
IssuerURL string
IssuerURL string `json:"issuer"`
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
// authorization endpoint.
AuthURL string
AuthURL string `json:"authorization_endpoint"`
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
// token endpoint.
TokenURL string
TokenURL string `json:"token_endpoint"`
// DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0
// device authorization endpoint.
DeviceAuthURL string
DeviceAuthURL string `json:"device_authorization_endpoint"`
// UserInfoURL is the endpoint used by the provider to support the OpenID
// Connect UserInfo flow.
//
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
UserInfoURL string
UserInfoURL string `json:"userinfo_endpoint"`
// JWKSURL is the endpoint used by the provider to advertise public keys to
// verify issued ID tokens. This endpoint is polled as new keys are made
// available.
JWKSURL string
JWKSURL string `json:"jwks_uri"`
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
// ID tokens. If not provided, this defaults to the algorithms advertised by
// the JWK endpoint, then the set of algorithms supported by this package.
Algorithms []string
Algorithms []string `json:"id_token_signing_alg_values_supported"`
}
// NewProvider initializes a provider from a set of endpoints, rather than
// through discovery.
//
// The provided context is only used for [http.Client] configuration through
// [ClientContext], not cancelation.
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
return &Provider{
issuer: p.IssuerURL,
@@ -202,9 +227,14 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
}
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
//
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
// or "https://login.salesforce.com".
//
// OpenID Connect providers that don't implement discovery or host the discovery
// document at a non-spec complaint path (such as requiring a URL parameter),
// should use [ProviderConfig] instead.
//
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
req, err := http.NewRequest("GET", wellKnown, nil)

View File

@@ -6,6 +6,51 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased] ##
## [0.4.1] - 2025-01-28 ##
### Fixed ###
- The restrictions added for `root` paths passed to `SecureJoin` in 0.4.0 was
found to be too strict and caused some regressions when folks tried to
update, so this restriction has been relaxed to only return an error if the
path contains a `..` component. We still recommend users use `filepath.Clean`
(and even `filepath.EvalSymlinks`) on the `root` path they are using, but at
least you will no longer be punished for "trivial" unclean paths.
## [0.4.0] - 2025-01-13 ##
### Breaking ####
- `SecureJoin(VFS)` will now return an error if the provided `root` is not a
`filepath.Clean`'d path.
While it is ultimately the responsibility of the caller to ensure the root is
a safe path to use, passing a path like `/symlink/..` as a root would result
in the `SecureJoin`'d path being placed in `/` even though `/symlink/..`
might be a different directory, and so we should more strongly discourage
such usage.
All major users of `securejoin.SecureJoin` already ensure that the paths they
provide are safe (and this is ultimately a question of user error), but
removing this foot-gun is probably a good idea. Of course, this is
necessarily a breaking API change (though we expect no real users to be
affected by it).
Thanks to [Erik Sjölund](https://github.com/eriksjolund), who initially
reported this issue as a possible security issue.
- `MkdirAll` and `MkdirHandle` now take an `os.FileMode`-style mode argument
instead of a raw `unix.S_*`-style mode argument, which may cause compile-time
type errors depending on how you use `filepath-securejoin`. For most users,
there will be no change in behaviour aside from the type change (as the
bottom `0o777` bits are the same in both formats, and most users are probably
only using those bits).
However, if you were using `unix.S_ISVTX` to set the sticky bit with
`MkdirAll(Handle)` you will need to switch to `os.ModeSticky` otherwise you
will get a runtime error with this update. In addition, the error message you
will get from passing `unix.S_ISUID` and `unix.S_ISGID` will be different as
they are treated as invalid bits now (note that previously passing said bits
was also an error).
## [0.3.6] - 2024-12-17 ##
### Compatibility ###
@@ -193,7 +238,9 @@ This is our first release of `github.com/cyphar/filepath-securejoin`,
containing a full implementation with a coverage of 93.5% (the only missing
cases are the error cases, which are hard to mocktest at the moment).
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...HEAD
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD
[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0
[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6
[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5
[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4

View File

@@ -1 +1 @@
0.3.6
0.4.1

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
// Copyright (C) 2017-2025 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -24,6 +24,31 @@ func IsNotExist(err error) bool {
return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
}
// errUnsafeRoot is returned if the user provides SecureJoinVFS with a path
// that contains ".." components.
var errUnsafeRoot = errors.New("root path provided to SecureJoin contains '..' components")
// stripVolume just gets rid of the Windows volume included in a path. Based on
// some godbolt tests, the Go compiler is smart enough to make this a no-op on
// Linux.
func stripVolume(path string) string {
return path[len(filepath.VolumeName(path)):]
}
// hasDotDot checks if the path contains ".." components in a platform-agnostic
// way.
func hasDotDot(path string) bool {
// If we are on Windows, strip any volume letters. It turns out that
// C:..\foo may (or may not) be a valid pathname and we need to handle that
// leading "..".
path = stripVolume(path)
// Look for "/../" in the path, but we need to handle leading and trailing
// ".."s by adding separators. Doing this with filepath.Separator is ugly
// so just convert to Unix-style "/" first.
path = filepath.ToSlash(path)
return strings.Contains("/"+path+"/", "/../")
}
// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
@@ -46,7 +71,22 @@ func IsNotExist(err error) bool {
// provided via direct input or when evaluating symlinks. Therefore:
//
// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
//
// If the provided root is not [filepath.Clean] then an error will be returned,
// as such root paths are bordering on somewhat unsafe and using such paths is
// not best practice. We also strongly suggest that any root path is first
// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to
// avoid containing symlink components. Of course, the root also *must not* be
// attacker-controlled.
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// The root path must not contain ".." components, otherwise when we join
// the subpath we will end up with a weird path. We could work around this
// in other ways but users shouldn't be giving us non-lexical root paths in
// the first place.
if hasDotDot(root) {
return "", errUnsafeRoot
}
// Use the os.* VFS implementation if none was specified.
if vfs == nil {
vfs = osVFS{}
@@ -59,9 +99,10 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
linksWalked int
)
for remainingPath != "" {
if v := filepath.VolumeName(remainingPath); v != "" {
remainingPath = remainingPath[len(v):]
}
// On Windows, if we managed to end up at a path referencing a volume,
// drop the volume to make sure we don't end up with broken paths or
// escaping the root volume.
remainingPath = stripVolume(remainingPath)
// Get the next path component.
var part string

Some files were not shown because too many files have changed in this diff Show More