Initial euclide.org release
This commit is contained in:
17
vendor/github.com/Azure/go-ntlmssp/.travis.yml
generated
vendored
Normal file
17
vendor/github.com/Azure/go-ntlmssp/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
before_script:
|
||||
- go get -u golang.org/x/lint/golint
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
script:
|
||||
- test -z "$(gofmt -s -l . | tee /dev/stderr)"
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
- go vet ./...
|
||||
- go build -v ./...
|
||||
- go test -v ./...
|
||||
21
vendor/github.com/Azure/go-ntlmssp/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/go-ntlmssp/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
29
vendor/github.com/Azure/go-ntlmssp/README.md
generated
vendored
Normal file
29
vendor/github.com/Azure/go-ntlmssp/README.md
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# go-ntlmssp
|
||||
Golang package that provides NTLM/Negotiate authentication over HTTP
|
||||
|
||||
[](https://godoc.org/github.com/Azure/go-ntlmssp) [](https://travis-ci.org/Azure/go-ntlmssp)
|
||||
|
||||
Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx
|
||||
Implementation hints from http://davenport.sourceforge.net/ntlm.html
|
||||
|
||||
This package only implements authentication, no key exchange or encryption. It
|
||||
only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding.
|
||||
This package implements NTLMv2.
|
||||
|
||||
# Usage
|
||||
|
||||
```
|
||||
url, user, password := "http://www.example.com/secrets", "robpike", "pw123"
|
||||
client := &http.Client{
|
||||
Transport: ntlmssp.Negotiator{
|
||||
RoundTripper:&http.Transport{},
|
||||
},
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.SetBasicAuth(user, password)
|
||||
res, _ := client.Do(req)
|
||||
```
|
||||
|
||||
-----
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
41
vendor/github.com/Azure/go-ntlmssp/SECURITY.md
generated
vendored
Normal file
41
vendor/github.com/Azure/go-ntlmssp/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
||||
187
vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
generated
vendored
Normal file
187
vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type authenicateMessage struct {
|
||||
LmChallengeResponse []byte
|
||||
NtChallengeResponse []byte
|
||||
|
||||
TargetName string
|
||||
UserName string
|
||||
|
||||
// only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH
|
||||
EncryptedRandomSessionKey []byte
|
||||
|
||||
NegotiateFlags negotiateFlags
|
||||
|
||||
MIC []byte
|
||||
}
|
||||
|
||||
type authenticateMessageFields struct {
|
||||
messageHeader
|
||||
LmChallengeResponse varField
|
||||
NtChallengeResponse varField
|
||||
TargetName varField
|
||||
UserName varField
|
||||
Workstation varField
|
||||
_ [8]byte
|
||||
NegotiateFlags negotiateFlags
|
||||
}
|
||||
|
||||
func (m authenicateMessage) MarshalBinary() ([]byte, error) {
|
||||
if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) {
|
||||
return nil, errors.New("Only unicode is supported")
|
||||
}
|
||||
|
||||
target, user := toUnicode(m.TargetName), toUnicode(m.UserName)
|
||||
workstation := toUnicode("")
|
||||
|
||||
ptr := binary.Size(&authenticateMessageFields{})
|
||||
f := authenticateMessageFields{
|
||||
messageHeader: newMessageHeader(3),
|
||||
NegotiateFlags: m.NegotiateFlags,
|
||||
LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)),
|
||||
NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)),
|
||||
TargetName: newVarField(&ptr, len(target)),
|
||||
UserName: newVarField(&ptr, len(user)),
|
||||
Workstation: newVarField(&ptr, len(workstation)),
|
||||
}
|
||||
|
||||
f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION)
|
||||
|
||||
b := bytes.Buffer{}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message
|
||||
//that was received from the server
|
||||
func ProcessChallenge(challengeMessageData []byte, user, password string, domainNeeded bool) ([]byte, error) {
|
||||
if user == "" && password == "" {
|
||||
return nil, errors.New("Anonymous authentication not supported")
|
||||
}
|
||||
|
||||
var cm challengeMessage
|
||||
if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
|
||||
return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
|
||||
}
|
||||
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
|
||||
return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
|
||||
}
|
||||
|
||||
if !domainNeeded {
|
||||
cm.TargetName = ""
|
||||
}
|
||||
|
||||
am := authenicateMessage{
|
||||
UserName: user,
|
||||
TargetName: cm.TargetName,
|
||||
NegotiateFlags: cm.NegotiateFlags,
|
||||
}
|
||||
|
||||
timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
|
||||
if timestamp == nil { // no time sent, take current time
|
||||
ft := uint64(time.Now().UnixNano()) / 100
|
||||
ft += 116444736000000000 // add time between unix & windows offset
|
||||
timestamp = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(timestamp, ft)
|
||||
}
|
||||
|
||||
clientChallenge := make([]byte, 8)
|
||||
rand.Reader.Read(clientChallenge)
|
||||
|
||||
ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName)
|
||||
|
||||
am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
|
||||
cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
|
||||
|
||||
if cm.TargetInfoRaw == nil {
|
||||
am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
|
||||
cm.ServerChallenge[:], clientChallenge)
|
||||
}
|
||||
return am.MarshalBinary()
|
||||
}
|
||||
|
||||
func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) {
|
||||
if user == "" && hash == "" {
|
||||
return nil, errors.New("Anonymous authentication not supported")
|
||||
}
|
||||
|
||||
var cm challengeMessage
|
||||
if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
|
||||
return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
|
||||
}
|
||||
if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
|
||||
return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
|
||||
}
|
||||
|
||||
am := authenicateMessage{
|
||||
UserName: user,
|
||||
TargetName: cm.TargetName,
|
||||
NegotiateFlags: cm.NegotiateFlags,
|
||||
}
|
||||
|
||||
timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
|
||||
if timestamp == nil { // no time sent, take current time
|
||||
ft := uint64(time.Now().UnixNano()) / 100
|
||||
ft += 116444736000000000 // add time between unix & windows offset
|
||||
timestamp = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(timestamp, ft)
|
||||
}
|
||||
|
||||
clientChallenge := make([]byte, 8)
|
||||
rand.Reader.Read(clientChallenge)
|
||||
|
||||
hashParts := strings.Split(hash, ":")
|
||||
if len(hashParts) > 1 {
|
||||
hash = hashParts[1]
|
||||
}
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName))
|
||||
|
||||
am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
|
||||
cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
|
||||
|
||||
if cm.TargetInfoRaw == nil {
|
||||
am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
|
||||
cm.ServerChallenge[:], clientChallenge)
|
||||
}
|
||||
return am.MarshalBinary()
|
||||
}
|
||||
66
vendor/github.com/Azure/go-ntlmssp/authheader.go
generated
vendored
Normal file
66
vendor/github.com/Azure/go-ntlmssp/authheader.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type authheader []string
|
||||
|
||||
func (h authheader) IsBasic() bool {
|
||||
for _, s := range h {
|
||||
if strings.HasPrefix(string(s), "Basic ") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h authheader) Basic() string {
|
||||
for _, s := range h {
|
||||
if strings.HasPrefix(string(s), "Basic ") {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h authheader) IsNegotiate() bool {
|
||||
for _, s := range h {
|
||||
if strings.HasPrefix(string(s), "Negotiate") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h authheader) IsNTLM() bool {
|
||||
for _, s := range h {
|
||||
if strings.HasPrefix(string(s), "NTLM") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h authheader) GetData() ([]byte, error) {
|
||||
for _, s := range h {
|
||||
if strings.HasPrefix(string(s), "NTLM") || strings.HasPrefix(string(s), "Negotiate") || strings.HasPrefix(string(s), "Basic ") {
|
||||
p := strings.Split(string(s), " ")
|
||||
if len(p) < 2 {
|
||||
return nil, nil
|
||||
}
|
||||
return base64.StdEncoding.DecodeString(string(p[1]))
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (h authheader) GetBasicCreds() (username, password string, err error) {
|
||||
d, err := h.GetData()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
parts := strings.SplitN(string(d), ":", 2)
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
17
vendor/github.com/Azure/go-ntlmssp/avids.go
generated
vendored
Normal file
17
vendor/github.com/Azure/go-ntlmssp/avids.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package ntlmssp
|
||||
|
||||
type avID uint16
|
||||
|
||||
const (
|
||||
avIDMsvAvEOL avID = iota
|
||||
avIDMsvAvNbComputerName
|
||||
avIDMsvAvNbDomainName
|
||||
avIDMsvAvDNSComputerName
|
||||
avIDMsvAvDNSDomainName
|
||||
avIDMsvAvDNSTreeName
|
||||
avIDMsvAvFlags
|
||||
avIDMsvAvTimestamp
|
||||
avIDMsvAvSingleHost
|
||||
avIDMsvAvTargetName
|
||||
avIDMsvChannelBindings
|
||||
)
|
||||
82
vendor/github.com/Azure/go-ntlmssp/challenge_message.go
generated
vendored
Normal file
82
vendor/github.com/Azure/go-ntlmssp/challenge_message.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type challengeMessageFields struct {
|
||||
messageHeader
|
||||
TargetName varField
|
||||
NegotiateFlags negotiateFlags
|
||||
ServerChallenge [8]byte
|
||||
_ [8]byte
|
||||
TargetInfo varField
|
||||
}
|
||||
|
||||
func (m challengeMessageFields) IsValid() bool {
|
||||
return m.messageHeader.IsValid() && m.MessageType == 2
|
||||
}
|
||||
|
||||
type challengeMessage struct {
|
||||
challengeMessageFields
|
||||
TargetName string
|
||||
TargetInfo map[avID][]byte
|
||||
TargetInfoRaw []byte
|
||||
}
|
||||
|
||||
func (m *challengeMessage) UnmarshalBinary(data []byte) error {
|
||||
r := bytes.NewReader(data)
|
||||
err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !m.challengeMessageFields.IsValid() {
|
||||
return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader)
|
||||
}
|
||||
|
||||
if m.challengeMessageFields.TargetName.Len > 0 {
|
||||
m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if m.challengeMessageFields.TargetInfo.Len > 0 {
|
||||
d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data)
|
||||
m.TargetInfoRaw = d
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.TargetInfo = make(map[avID][]byte)
|
||||
r := bytes.NewReader(d)
|
||||
for {
|
||||
var id avID
|
||||
var l uint16
|
||||
err = binary.Read(r, binary.LittleEndian, &id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if id == avIDMsvAvEOL {
|
||||
break
|
||||
}
|
||||
|
||||
err = binary.Read(r, binary.LittleEndian, &l)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value := make([]byte, l)
|
||||
n, err := r.Read(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != int(l) {
|
||||
return fmt.Errorf("Expected to read %d bytes, got only %d", l, n)
|
||||
}
|
||||
m.TargetInfo[id] = value
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
21
vendor/github.com/Azure/go-ntlmssp/messageheader.go
generated
vendored
Normal file
21
vendor/github.com/Azure/go-ntlmssp/messageheader.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0}
|
||||
|
||||
type messageHeader struct {
|
||||
Signature [8]byte
|
||||
MessageType uint32
|
||||
}
|
||||
|
||||
func (h messageHeader) IsValid() bool {
|
||||
return bytes.Equal(h.Signature[:], signature[:]) &&
|
||||
h.MessageType > 0 && h.MessageType < 4
|
||||
}
|
||||
|
||||
func newMessageHeader(messageType uint32) messageHeader {
|
||||
return messageHeader{signature, messageType}
|
||||
}
|
||||
52
vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go
generated
vendored
Normal file
52
vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package ntlmssp
|
||||
|
||||
type negotiateFlags uint32
|
||||
|
||||
const (
|
||||
/*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0
|
||||
/*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1
|
||||
/*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2
|
||||
|
||||
/*D*/
|
||||
negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4
|
||||
/*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5
|
||||
/*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6
|
||||
/*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7
|
||||
|
||||
/*H*/
|
||||
negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9
|
||||
|
||||
/*J*/
|
||||
negotiateFlagANONYMOUS = 1 << 11
|
||||
/*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12
|
||||
/*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13
|
||||
|
||||
/*M*/
|
||||
negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15
|
||||
/*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16
|
||||
/*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17
|
||||
|
||||
/*P*/
|
||||
negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19
|
||||
/*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20
|
||||
|
||||
/*R*/
|
||||
negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22
|
||||
/*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23
|
||||
|
||||
/*T*/
|
||||
negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25
|
||||
|
||||
/*U*/
|
||||
negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29
|
||||
/*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30
|
||||
/*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31
|
||||
)
|
||||
|
||||
func (field negotiateFlags) Has(flags negotiateFlags) bool {
|
||||
return field&flags == flags
|
||||
}
|
||||
|
||||
func (field *negotiateFlags) Unset(flags negotiateFlags) {
|
||||
*field = *field ^ (*field & flags)
|
||||
}
|
||||
64
vendor/github.com/Azure/go-ntlmssp/negotiate_message.go
generated
vendored
Normal file
64
vendor/github.com/Azure/go-ntlmssp/negotiate_message.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const expMsgBodyLen = 40
|
||||
|
||||
type negotiateMessageFields struct {
|
||||
messageHeader
|
||||
NegotiateFlags negotiateFlags
|
||||
|
||||
Domain varField
|
||||
Workstation varField
|
||||
|
||||
Version
|
||||
}
|
||||
|
||||
var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO |
|
||||
negotiateFlagNTLMSSPNEGOTIATE56 |
|
||||
negotiateFlagNTLMSSPNEGOTIATE128 |
|
||||
negotiateFlagNTLMSSPNEGOTIATEUNICODE |
|
||||
negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY
|
||||
|
||||
//NewNegotiateMessage creates a new NEGOTIATE message with the
|
||||
//flags that this package supports.
|
||||
func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) {
|
||||
payloadOffset := expMsgBodyLen
|
||||
flags := defaultFlags
|
||||
|
||||
if domainName != "" {
|
||||
flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED
|
||||
}
|
||||
|
||||
if workstationName != "" {
|
||||
flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED
|
||||
}
|
||||
|
||||
msg := negotiateMessageFields{
|
||||
messageHeader: newMessageHeader(1),
|
||||
NegotiateFlags: flags,
|
||||
Domain: newVarField(&payloadOffset, len(domainName)),
|
||||
Workstation: newVarField(&payloadOffset, len(workstationName)),
|
||||
Version: DefaultVersion(),
|
||||
}
|
||||
|
||||
b := bytes.Buffer{}
|
||||
if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.Len() != expMsgBodyLen {
|
||||
return nil, errors.New("incorrect body length")
|
||||
}
|
||||
|
||||
payload := strings.ToUpper(domainName + workstationName)
|
||||
if _, err := b.WriteString(payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
151
vendor/github.com/Azure/go-ntlmssp/negotiator.go
generated
vendored
Normal file
151
vendor/github.com/Azure/go-ntlmssp/negotiator.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetDomain : parse domain name from based on slashes in the input
|
||||
// Need to check for upn as well
|
||||
func GetDomain(user string) (string, string, bool) {
|
||||
domain := ""
|
||||
domainNeeded := false
|
||||
|
||||
if strings.Contains(user, "\\") {
|
||||
ucomponents := strings.SplitN(user, "\\", 2)
|
||||
domain = ucomponents[0]
|
||||
user = ucomponents[1]
|
||||
domainNeeded = true
|
||||
} else if strings.Contains(user, "@") {
|
||||
domainNeeded = false
|
||||
} else {
|
||||
domainNeeded = true
|
||||
}
|
||||
return user, domain, domainNeeded
|
||||
}
|
||||
|
||||
//Negotiator is a http.Roundtripper decorator that automatically
|
||||
//converts basic authentication to NTLM/Negotiate authentication when appropriate.
|
||||
type Negotiator struct{ http.RoundTripper }
|
||||
|
||||
//RoundTrip sends the request to the server, handling any authentication
|
||||
//re-sends as needed.
|
||||
func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) {
|
||||
// Use default round tripper if not provided
|
||||
rt := l.RoundTripper
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
}
|
||||
// If it is not basic auth, just round trip the request as usual
|
||||
reqauth := authheader(req.Header.Values("Authorization"))
|
||||
if !reqauth.IsBasic() {
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
reqauthBasic := reqauth.Basic()
|
||||
// Save request body
|
||||
body := bytes.Buffer{}
|
||||
if req.Body != nil {
|
||||
_, err = body.ReadFrom(req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Body.Close()
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
||||
}
|
||||
// first try anonymous, in case the server still finds us
|
||||
// authenticated from previous traffic
|
||||
req.Header.Del("Authorization")
|
||||
res, err = rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusUnauthorized {
|
||||
return res, err
|
||||
}
|
||||
resauth := authheader(res.Header.Values("Www-Authenticate"))
|
||||
if !resauth.IsNegotiate() && !resauth.IsNTLM() {
|
||||
// Unauthorized, Negotiate not requested, let's try with basic auth
|
||||
req.Header.Set("Authorization", string(reqauthBasic))
|
||||
io.Copy(ioutil.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
||||
|
||||
res, err = rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusUnauthorized {
|
||||
return res, err
|
||||
}
|
||||
resauth = authheader(res.Header.Values("Www-Authenticate"))
|
||||
}
|
||||
|
||||
if resauth.IsNegotiate() || resauth.IsNTLM() {
|
||||
// 401 with request:Basic and response:Negotiate
|
||||
io.Copy(ioutil.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
|
||||
// recycle credentials
|
||||
u, p, err := reqauth.GetBasicCreds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get domain from username
|
||||
domain := ""
|
||||
u, domain, domainNeeded := GetDomain(u)
|
||||
|
||||
// send negotiate
|
||||
negotiateMessage, err := NewNegotiateMessage(domain, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resauth.IsNTLM() {
|
||||
req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage))
|
||||
} else {
|
||||
req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage))
|
||||
}
|
||||
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
||||
|
||||
res, err = rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// receive challenge?
|
||||
resauth = authheader(res.Header.Values("Www-Authenticate"))
|
||||
challengeMessage, err := resauth.GetData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 {
|
||||
// Negotiation failed, let client deal with response
|
||||
return res, nil
|
||||
}
|
||||
io.Copy(ioutil.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
|
||||
// send authenticate
|
||||
authenticateMessage, err := ProcessChallenge(challengeMessage, u, p, domainNeeded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resauth.IsNTLM() {
|
||||
req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage))
|
||||
} else {
|
||||
req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage))
|
||||
}
|
||||
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
||||
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
51
vendor/github.com/Azure/go-ntlmssp/nlmp.go
generated
vendored
Normal file
51
vendor/github.com/Azure/go-ntlmssp/nlmp.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Package ntlmssp provides NTLM/Negotiate authentication over HTTP
|
||||
//
|
||||
// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx,
|
||||
// implementation hints from http://davenport.sourceforge.net/ntlm.html .
|
||||
// This package only implements authentication, no key exchange or encryption. It
|
||||
// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding.
|
||||
// This package implements NTLMv2.
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"golang.org/x/crypto/md4"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getNtlmV2Hash(password, username, target string) []byte {
|
||||
return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target))
|
||||
}
|
||||
|
||||
func getNtlmHash(password string) []byte {
|
||||
hash := md4.New()
|
||||
hash.Write(toUnicode(password))
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge,
|
||||
timestamp, targetInfo []byte) []byte {
|
||||
|
||||
temp := []byte{1, 1, 0, 0, 0, 0, 0, 0}
|
||||
temp = append(temp, timestamp...)
|
||||
temp = append(temp, clientChallenge...)
|
||||
temp = append(temp, 0, 0, 0, 0)
|
||||
temp = append(temp, targetInfo...)
|
||||
temp = append(temp, 0, 0, 0, 0)
|
||||
|
||||
NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp)
|
||||
return append(NTProofStr, temp...)
|
||||
}
|
||||
|
||||
func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte {
|
||||
return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...)
|
||||
}
|
||||
|
||||
func hmacMd5(key []byte, data ...[]byte) []byte {
|
||||
mac := hmac.New(md5.New, key)
|
||||
for _, d := range data {
|
||||
mac.Write(d)
|
||||
}
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
29
vendor/github.com/Azure/go-ntlmssp/unicode.go
generated
vendored
Normal file
29
vendor/github.com/Azure/go-ntlmssp/unicode.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// helper func's for dealing with Windows Unicode (UTF16LE)
|
||||
|
||||
func fromUnicode(d []byte) (string, error) {
|
||||
if len(d)%2 > 0 {
|
||||
return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length")
|
||||
}
|
||||
s := make([]uint16, len(d)/2)
|
||||
err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(utf16.Decode(s)), nil
|
||||
}
|
||||
|
||||
func toUnicode(s string) []byte {
|
||||
uints := utf16.Encode([]rune(s))
|
||||
b := bytes.Buffer{}
|
||||
binary.Write(&b, binary.LittleEndian, &uints)
|
||||
return b.Bytes()
|
||||
}
|
||||
40
vendor/github.com/Azure/go-ntlmssp/varfield.go
generated
vendored
Normal file
40
vendor/github.com/Azure/go-ntlmssp/varfield.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package ntlmssp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type varField struct {
|
||||
Len uint16
|
||||
MaxLen uint16
|
||||
BufferOffset uint32
|
||||
}
|
||||
|
||||
func (f varField) ReadFrom(buffer []byte) ([]byte, error) {
|
||||
if len(buffer) < int(f.BufferOffset+uint32(f.Len)) {
|
||||
return nil, errors.New("Error reading data, varField extends beyond buffer")
|
||||
}
|
||||
return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil
|
||||
}
|
||||
|
||||
func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) {
|
||||
d, err := f.ReadFrom(buffer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if unicode { // UTF-16LE encoding scheme
|
||||
return fromUnicode(d)
|
||||
}
|
||||
// OEM encoding, close enough to ASCII, since no code page is specified
|
||||
return string(d), err
|
||||
}
|
||||
|
||||
func newVarField(ptr *int, fieldsize int) varField {
|
||||
f := varField{
|
||||
Len: uint16(fieldsize),
|
||||
MaxLen: uint16(fieldsize),
|
||||
BufferOffset: uint32(*ptr),
|
||||
}
|
||||
*ptr += fieldsize
|
||||
return f
|
||||
}
|
||||
20
vendor/github.com/Azure/go-ntlmssp/version.go
generated
vendored
Normal file
20
vendor/github.com/Azure/go-ntlmssp/version.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package ntlmssp
|
||||
|
||||
// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx
|
||||
type Version struct {
|
||||
ProductMajorVersion uint8
|
||||
ProductMinorVersion uint8
|
||||
ProductBuild uint16
|
||||
_ [3]byte
|
||||
NTLMRevisionCurrent uint8
|
||||
}
|
||||
|
||||
// DefaultVersion returns a Version with "sensible" defaults (Windows 7)
|
||||
func DefaultVersion() Version {
|
||||
return Version{
|
||||
ProductMajorVersion: 6,
|
||||
ProductMinorVersion: 1,
|
||||
ProductBuild: 7601,
|
||||
NTLMRevisionCurrent: 15,
|
||||
}
|
||||
}
|
||||
22
vendor/github.com/go-asn1-ber/asn1-ber/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-asn1-ber/asn1-ber/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
|
||||
Portions copyright (c) 2015-2016 go-asn1-ber Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
24
vendor/github.com/go-asn1-ber/asn1-ber/README.md
generated
vendored
Normal file
24
vendor/github.com/go-asn1-ber/asn1-ber/README.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
[](https://godoc.org/gopkg.in/asn1-ber.v1) [](https://travis-ci.org/go-asn1-ber/asn1-ber)
|
||||
|
||||
|
||||
ASN1 BER Encoding / Decoding Library for the GO programming language.
|
||||
---------------------------------------------------------------------
|
||||
|
||||
Required libraries:
|
||||
None
|
||||
|
||||
Working:
|
||||
Very basic encoding / decoding needed for LDAP protocol
|
||||
|
||||
Tests Implemented:
|
||||
A few
|
||||
|
||||
TODO:
|
||||
Fix all encoding / decoding to conform to ASN1 BER spec
|
||||
Implement Tests / Benchmarks
|
||||
|
||||
---
|
||||
|
||||
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
|
||||
The design is licensed under the Creative Commons 3.0 Attributions license.
|
||||
Read this article for more details: http://blog.golang.org/gopher
|
||||
625
vendor/github.com/go-asn1-ber/asn1-ber/ber.go
generated
vendored
Normal file
625
vendor/github.com/go-asn1-ber/asn1-ber/ber.go
generated
vendored
Normal file
@@ -0,0 +1,625 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for
|
||||
// no limit.
|
||||
var MaxPacketLengthBytes int64 = math.MaxInt32
|
||||
|
||||
type Packet struct {
|
||||
Identifier
|
||||
Value interface{}
|
||||
ByteValue []byte
|
||||
Data *bytes.Buffer
|
||||
Children []*Packet
|
||||
Description string
|
||||
}
|
||||
|
||||
type Identifier struct {
|
||||
ClassType Class
|
||||
TagType Type
|
||||
Tag Tag
|
||||
}
|
||||
|
||||
type Tag uint64
|
||||
|
||||
const (
|
||||
TagEOC Tag = 0x00
|
||||
TagBoolean Tag = 0x01
|
||||
TagInteger Tag = 0x02
|
||||
TagBitString Tag = 0x03
|
||||
TagOctetString Tag = 0x04
|
||||
TagNULL Tag = 0x05
|
||||
TagObjectIdentifier Tag = 0x06
|
||||
TagObjectDescriptor Tag = 0x07
|
||||
TagExternal Tag = 0x08
|
||||
TagRealFloat Tag = 0x09
|
||||
TagEnumerated Tag = 0x0a
|
||||
TagEmbeddedPDV Tag = 0x0b
|
||||
TagUTF8String Tag = 0x0c
|
||||
TagRelativeOID Tag = 0x0d
|
||||
TagSequence Tag = 0x10
|
||||
TagSet Tag = 0x11
|
||||
TagNumericString Tag = 0x12
|
||||
TagPrintableString Tag = 0x13
|
||||
TagT61String Tag = 0x14
|
||||
TagVideotexString Tag = 0x15
|
||||
TagIA5String Tag = 0x16
|
||||
TagUTCTime Tag = 0x17
|
||||
TagGeneralizedTime Tag = 0x18
|
||||
TagGraphicString Tag = 0x19
|
||||
TagVisibleString Tag = 0x1a
|
||||
TagGeneralString Tag = 0x1b
|
||||
TagUniversalString Tag = 0x1c
|
||||
TagCharacterString Tag = 0x1d
|
||||
TagBMPString Tag = 0x1e
|
||||
TagBitmask Tag = 0x1f // xxx11111b
|
||||
|
||||
// HighTag indicates the start of a high-tag byte sequence
|
||||
HighTag Tag = 0x1f // xxx11111b
|
||||
// HighTagContinueBitmask indicates the high-tag byte sequence should continue
|
||||
HighTagContinueBitmask Tag = 0x80 // 10000000b
|
||||
// HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
|
||||
HighTagValueBitmask Tag = 0x7f // 01111111b
|
||||
)
|
||||
|
||||
const (
|
||||
// LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
|
||||
LengthLongFormBitmask = 0x80
|
||||
// LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
|
||||
LengthValueBitmask = 0x7f
|
||||
|
||||
// LengthIndefinite is returned from readLength to indicate an indefinite length
|
||||
LengthIndefinite = -1
|
||||
)
|
||||
|
||||
var tagMap = map[Tag]string{
|
||||
TagEOC: "EOC (End-of-Content)",
|
||||
TagBoolean: "Boolean",
|
||||
TagInteger: "Integer",
|
||||
TagBitString: "Bit String",
|
||||
TagOctetString: "Octet String",
|
||||
TagNULL: "NULL",
|
||||
TagObjectIdentifier: "Object Identifier",
|
||||
TagObjectDescriptor: "Object Descriptor",
|
||||
TagExternal: "External",
|
||||
TagRealFloat: "Real (float)",
|
||||
TagEnumerated: "Enumerated",
|
||||
TagEmbeddedPDV: "Embedded PDV",
|
||||
TagUTF8String: "UTF8 String",
|
||||
TagRelativeOID: "Relative-OID",
|
||||
TagSequence: "Sequence and Sequence of",
|
||||
TagSet: "Set and Set OF",
|
||||
TagNumericString: "Numeric String",
|
||||
TagPrintableString: "Printable String",
|
||||
TagT61String: "T61 String",
|
||||
TagVideotexString: "Videotex String",
|
||||
TagIA5String: "IA5 String",
|
||||
TagUTCTime: "UTC Time",
|
||||
TagGeneralizedTime: "Generalized Time",
|
||||
TagGraphicString: "Graphic String",
|
||||
TagVisibleString: "Visible String",
|
||||
TagGeneralString: "General String",
|
||||
TagUniversalString: "Universal String",
|
||||
TagCharacterString: "Character String",
|
||||
TagBMPString: "BMP String",
|
||||
}
|
||||
|
||||
type Class uint8
|
||||
|
||||
const (
|
||||
ClassUniversal Class = 0 // 00xxxxxxb
|
||||
ClassApplication Class = 64 // 01xxxxxxb
|
||||
ClassContext Class = 128 // 10xxxxxxb
|
||||
ClassPrivate Class = 192 // 11xxxxxxb
|
||||
ClassBitmask Class = 192 // 11xxxxxxb
|
||||
)
|
||||
|
||||
var ClassMap = map[Class]string{
|
||||
ClassUniversal: "Universal",
|
||||
ClassApplication: "Application",
|
||||
ClassContext: "Context",
|
||||
ClassPrivate: "Private",
|
||||
}
|
||||
|
||||
type Type uint8
|
||||
|
||||
const (
|
||||
TypePrimitive Type = 0 // xx0xxxxxb
|
||||
TypeConstructed Type = 32 // xx1xxxxxb
|
||||
TypeBitmask Type = 32 // xx1xxxxxb
|
||||
)
|
||||
|
||||
var TypeMap = map[Type]string{
|
||||
TypePrimitive: "Primitive",
|
||||
TypeConstructed: "Constructed",
|
||||
}
|
||||
|
||||
var Debug = false
|
||||
|
||||
func PrintBytes(out io.Writer, buf []byte, indent string) {
|
||||
dataLines := make([]string, (len(buf)/30)+1)
|
||||
numLines := make([]string, (len(buf)/30)+1)
|
||||
|
||||
for i, b := range buf {
|
||||
dataLines[i/30] += fmt.Sprintf("%02x ", b)
|
||||
numLines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
|
||||
}
|
||||
|
||||
for i := 0; i < len(dataLines); i++ {
|
||||
_, _ = out.Write([]byte(indent + dataLines[i] + "\n"))
|
||||
_, _ = out.Write([]byte(indent + numLines[i] + "\n\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func WritePacket(out io.Writer, p *Packet) {
|
||||
printPacket(out, p, 0, false)
|
||||
}
|
||||
|
||||
func PrintPacket(p *Packet) {
|
||||
printPacket(os.Stdout, p, 0, false)
|
||||
}
|
||||
|
||||
// Return a string describing packet content. This is not recursive,
|
||||
// If the packet is a sequence, use `printPacket()`, or browse
|
||||
// sequence yourself.
|
||||
func DescribePacket(p *Packet) string {
|
||||
|
||||
classStr := ClassMap[p.ClassType]
|
||||
|
||||
tagTypeStr := TypeMap[p.TagType]
|
||||
|
||||
tagStr := fmt.Sprintf("0x%02X", p.Tag)
|
||||
|
||||
if p.ClassType == ClassUniversal {
|
||||
tagStr = tagMap[p.Tag]
|
||||
}
|
||||
|
||||
value := fmt.Sprint(p.Value)
|
||||
description := ""
|
||||
|
||||
if p.Description != "" {
|
||||
description = p.Description + ": "
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s(%s, %s, %s) Len=%d %q", description, classStr, tagTypeStr, tagStr, p.Data.Len(), value)
|
||||
}
|
||||
|
||||
func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
|
||||
indentStr := ""
|
||||
|
||||
for len(indentStr) != indent {
|
||||
indentStr += " "
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(out, "%s%s\n", indentStr, DescribePacket(p))
|
||||
|
||||
if printBytes {
|
||||
PrintBytes(out, p.Bytes(), indentStr)
|
||||
}
|
||||
|
||||
for _, child := range p.Children {
|
||||
printPacket(out, child, indent+1, printBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadPacket reads a single Packet from the reader.
|
||||
func ReadPacket(reader io.Reader) (*Packet, error) {
|
||||
p, _, err := readPacket(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func DecodeString(data []byte) string {
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func ParseInt64(bytes []byte) (ret int64, err error) {
|
||||
if len(bytes) > 8 {
|
||||
// We'll overflow an int64 in this case.
|
||||
err = fmt.Errorf("integer too large")
|
||||
return
|
||||
}
|
||||
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
|
||||
ret <<= 8
|
||||
ret |= int64(bytes[bytesRead])
|
||||
}
|
||||
|
||||
// Shift up and down in order to sign extend the result.
|
||||
ret <<= 64 - uint8(len(bytes))*8
|
||||
ret >>= 64 - uint8(len(bytes))*8
|
||||
return
|
||||
}
|
||||
|
||||
func encodeInteger(i int64) []byte {
|
||||
n := int64Length(i)
|
||||
out := make([]byte, n)
|
||||
|
||||
var j int
|
||||
for ; n > 0; n-- {
|
||||
out[j] = byte(i >> uint((n-1)*8))
|
||||
j++
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func int64Length(i int64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 127 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
for i < -128 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DecodePacket decodes the given bytes into a single Packet
|
||||
// If a decode error is encountered, nil is returned.
|
||||
func DecodePacket(data []byte) *Packet {
|
||||
p, _, _ := readPacket(bytes.NewBuffer(data))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// DecodePacketErr decodes the given bytes into a single Packet
|
||||
// If a decode error is encountered, nil is returned.
|
||||
func DecodePacketErr(data []byte) (*Packet, error) {
|
||||
p, _, err := readPacket(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// readPacket reads a single Packet from the reader, returning the number of bytes read.
|
||||
func readPacket(reader io.Reader) (*Packet, int, error) {
|
||||
identifier, length, read, err := readHeader(reader)
|
||||
if err != nil {
|
||||
return nil, read, err
|
||||
}
|
||||
|
||||
p := &Packet{
|
||||
Identifier: identifier,
|
||||
}
|
||||
|
||||
p.Data = new(bytes.Buffer)
|
||||
p.Children = make([]*Packet, 0, 2)
|
||||
p.Value = nil
|
||||
|
||||
if p.TagType == TypeConstructed {
|
||||
// TODO: if universal, ensure tag type is allowed to be constructed
|
||||
|
||||
// Track how much content we've read
|
||||
contentRead := 0
|
||||
for {
|
||||
if length != LengthIndefinite {
|
||||
// End if we've read what we've been told to
|
||||
if contentRead == length {
|
||||
break
|
||||
}
|
||||
// Detect if a packet boundary didn't fall on the expected length
|
||||
if contentRead > length {
|
||||
return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the next packet
|
||||
child, r, err := readPacket(reader)
|
||||
if err != nil {
|
||||
return nil, read, unexpectedEOF(err)
|
||||
}
|
||||
contentRead += r
|
||||
read += r
|
||||
|
||||
// Test is this is the EOC marker for our packet
|
||||
if isEOCPacket(child) {
|
||||
if length == LengthIndefinite {
|
||||
break
|
||||
}
|
||||
return nil, read, errors.New("eoc child not allowed with definite length")
|
||||
}
|
||||
|
||||
// Append and continue
|
||||
p.AppendChild(child)
|
||||
}
|
||||
return p, read, nil
|
||||
}
|
||||
|
||||
if length == LengthIndefinite {
|
||||
return nil, read, errors.New("indefinite length used with primitive type")
|
||||
}
|
||||
|
||||
// Read definite-length content
|
||||
if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes {
|
||||
return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes)
|
||||
}
|
||||
content := make([]byte, length)
|
||||
if length > 0 {
|
||||
_, err := io.ReadFull(reader, content)
|
||||
if err != nil {
|
||||
return nil, read, unexpectedEOF(err)
|
||||
}
|
||||
read += length
|
||||
}
|
||||
|
||||
if p.ClassType == ClassUniversal {
|
||||
p.Data.Write(content)
|
||||
p.ByteValue = content
|
||||
|
||||
switch p.Tag {
|
||||
case TagEOC:
|
||||
case TagBoolean:
|
||||
val, _ := ParseInt64(content)
|
||||
|
||||
p.Value = val != 0
|
||||
case TagInteger:
|
||||
p.Value, _ = ParseInt64(content)
|
||||
case TagBitString:
|
||||
case TagOctetString:
|
||||
// the actual string encoding is not known here
|
||||
// (e.g. for LDAP content is already an UTF8-encoded
|
||||
// string). Return the data without further processing
|
||||
p.Value = DecodeString(content)
|
||||
case TagNULL:
|
||||
case TagObjectIdentifier:
|
||||
case TagObjectDescriptor:
|
||||
case TagExternal:
|
||||
case TagRealFloat:
|
||||
p.Value, err = ParseReal(content)
|
||||
case TagEnumerated:
|
||||
p.Value, _ = ParseInt64(content)
|
||||
case TagEmbeddedPDV:
|
||||
case TagUTF8String:
|
||||
val := DecodeString(content)
|
||||
if !utf8.Valid([]byte(val)) {
|
||||
err = errors.New("invalid UTF-8 string")
|
||||
} else {
|
||||
p.Value = val
|
||||
}
|
||||
case TagRelativeOID:
|
||||
case TagSequence:
|
||||
case TagSet:
|
||||
case TagNumericString:
|
||||
case TagPrintableString:
|
||||
val := DecodeString(content)
|
||||
if err = isPrintableString(val); err == nil {
|
||||
p.Value = val
|
||||
}
|
||||
case TagT61String:
|
||||
case TagVideotexString:
|
||||
case TagIA5String:
|
||||
val := DecodeString(content)
|
||||
for i, c := range val {
|
||||
if c >= 0x7F {
|
||||
err = fmt.Errorf("invalid character for IA5String at pos %d: %c", i, c)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
p.Value = val
|
||||
}
|
||||
case TagUTCTime:
|
||||
case TagGeneralizedTime:
|
||||
p.Value, err = ParseGeneralizedTime(content)
|
||||
case TagGraphicString:
|
||||
case TagVisibleString:
|
||||
case TagGeneralString:
|
||||
case TagUniversalString:
|
||||
case TagCharacterString:
|
||||
case TagBMPString:
|
||||
}
|
||||
} else {
|
||||
p.Data.Write(content)
|
||||
}
|
||||
|
||||
return p, read, err
|
||||
}
|
||||
|
||||
func isPrintableString(val string) error {
|
||||
for i, c := range val {
|
||||
switch {
|
||||
case c >= 'a' && c <= 'z':
|
||||
case c >= 'A' && c <= 'Z':
|
||||
case c >= '0' && c <= '9':
|
||||
default:
|
||||
switch c {
|
||||
case '\'', '(', ')', '+', ',', '-', '.', '=', '/', ':', '?', ' ':
|
||||
default:
|
||||
return fmt.Errorf("invalid character in position %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Packet) Bytes() []byte {
|
||||
var out bytes.Buffer
|
||||
|
||||
out.Write(encodeIdentifier(p.Identifier))
|
||||
out.Write(encodeLength(p.Data.Len()))
|
||||
out.Write(p.Data.Bytes())
|
||||
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
func (p *Packet) AppendChild(child *Packet) {
|
||||
p.Data.Write(child.Bytes())
|
||||
p.Children = append(p.Children, child)
|
||||
}
|
||||
|
||||
func Encode(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
|
||||
p := new(Packet)
|
||||
|
||||
p.ClassType = classType
|
||||
p.TagType = tagType
|
||||
p.Tag = tag
|
||||
p.Data = new(bytes.Buffer)
|
||||
|
||||
p.Children = make([]*Packet, 0, 2)
|
||||
|
||||
p.Value = value
|
||||
p.Description = description
|
||||
|
||||
if value != nil {
|
||||
v := reflect.ValueOf(value)
|
||||
|
||||
if classType == ClassUniversal {
|
||||
switch tag {
|
||||
case TagOctetString:
|
||||
sv, ok := v.Interface().(string)
|
||||
|
||||
if ok {
|
||||
p.Data.Write([]byte(sv))
|
||||
}
|
||||
case TagEnumerated:
|
||||
bv, ok := v.Interface().([]byte)
|
||||
if ok {
|
||||
p.Data.Write(bv)
|
||||
}
|
||||
case TagEmbeddedPDV:
|
||||
bv, ok := v.Interface().([]byte)
|
||||
if ok {
|
||||
p.Data.Write(bv)
|
||||
}
|
||||
}
|
||||
} else if classType == ClassContext {
|
||||
switch tag {
|
||||
case TagEnumerated:
|
||||
bv, ok := v.Interface().([]byte)
|
||||
if ok {
|
||||
p.Data.Write(bv)
|
||||
}
|
||||
case TagEmbeddedPDV:
|
||||
bv, ok := v.Interface().([]byte)
|
||||
if ok {
|
||||
p.Data.Write(bv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func NewSequence(description string) *Packet {
|
||||
return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, description)
|
||||
}
|
||||
|
||||
func NewBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet {
|
||||
intValue := int64(0)
|
||||
|
||||
if value {
|
||||
intValue = 1
|
||||
}
|
||||
|
||||
p := Encode(classType, tagType, tag, nil, description)
|
||||
|
||||
p.Value = value
|
||||
p.Data.Write(encodeInteger(intValue))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet.
|
||||
func NewLDAPBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet {
|
||||
intValue := int64(0)
|
||||
|
||||
if value {
|
||||
intValue = 255
|
||||
}
|
||||
|
||||
p := Encode(classType, tagType, tag, nil, description)
|
||||
|
||||
p.Value = value
|
||||
p.Data.Write(encodeInteger(intValue))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewInteger(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
|
||||
p := Encode(classType, tagType, tag, nil, description)
|
||||
|
||||
p.Value = value
|
||||
switch v := value.(type) {
|
||||
case int:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int64:
|
||||
p.Data.Write(encodeInteger(v))
|
||||
case uint64:
|
||||
// TODO : check range or add encodeUInt...
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int32:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint32:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int16:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint16:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int8:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint8:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
default:
|
||||
// TODO : add support for big.Int ?
|
||||
panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewString(classType Class, tagType Type, tag Tag, value, description string) *Packet {
|
||||
p := Encode(classType, tagType, tag, nil, description)
|
||||
|
||||
p.Value = value
|
||||
p.Data.Write([]byte(value))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewGeneralizedTime(classType Class, tagType Type, tag Tag, value time.Time, description string) *Packet {
|
||||
p := Encode(classType, tagType, tag, nil, description)
|
||||
var s string
|
||||
if value.Nanosecond() != 0 {
|
||||
s = value.Format(`20060102150405.000000000Z`)
|
||||
} else {
|
||||
s = value.Format(`20060102150405Z`)
|
||||
}
|
||||
p.Value = s
|
||||
p.Data.Write([]byte(s))
|
||||
return p
|
||||
}
|
||||
|
||||
func NewReal(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
|
||||
p := Encode(classType, tagType, tag, nil, description)
|
||||
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
p.Data.Write(encodeFloat(v))
|
||||
case float32:
|
||||
p.Data.Write(encodeFloat(float64(v)))
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v))
|
||||
}
|
||||
return p
|
||||
}
|
||||
25
vendor/github.com/go-asn1-ber/asn1-ber/content_int.go
generated
vendored
Normal file
25
vendor/github.com/go-asn1-ber/asn1-ber/content_int.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package ber
|
||||
|
||||
func encodeUnsignedInteger(i uint64) []byte {
|
||||
n := uint64Length(i)
|
||||
out := make([]byte, n)
|
||||
|
||||
var j int
|
||||
for ; n > 0; n-- {
|
||||
out[j] = byte(i >> uint((n-1)*8))
|
||||
j++
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func uint64Length(i uint64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
105
vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go
generated
vendored
Normal file
105
vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrInvalidTimeFormat is returned when the generalizedTime string was not correct.
|
||||
var ErrInvalidTimeFormat = errors.New("invalid time format")
|
||||
|
||||
var zeroTime = time.Time{}
|
||||
|
||||
// ParseGeneralizedTime parses a string value and if it conforms to
|
||||
// GeneralizedTime[^0] format, will return a time.Time for that value.
|
||||
//
|
||||
// [^0]: https://www.itu.int/rec/T-REC-X.690-201508-I/en Section 11.7
|
||||
func ParseGeneralizedTime(v []byte) (time.Time, error) {
|
||||
var format string
|
||||
var fract time.Duration
|
||||
|
||||
str := []byte(DecodeString(v))
|
||||
tzIndex := bytes.IndexAny(str, "Z+-")
|
||||
if tzIndex < 0 {
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
|
||||
dot := bytes.IndexAny(str, ".,")
|
||||
switch dot {
|
||||
case -1:
|
||||
switch tzIndex {
|
||||
case 10:
|
||||
format = `2006010215Z`
|
||||
case 12:
|
||||
format = `200601021504Z`
|
||||
case 14:
|
||||
format = `20060102150405Z`
|
||||
default:
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
|
||||
case 10, 12:
|
||||
if tzIndex < dot {
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
// a "," is also allowed, but would not be parsed by time.Parse():
|
||||
str[dot] = '.'
|
||||
|
||||
// If <minute> is omitted, then <fraction> represents a fraction of an
|
||||
// hour; otherwise, if <second> and <leap-second> are omitted, then
|
||||
// <fraction> represents a fraction of a minute; otherwise, <fraction>
|
||||
// represents a fraction of a second.
|
||||
|
||||
// parse as float from dot to timezone
|
||||
f, err := strconv.ParseFloat(string(str[dot:tzIndex]), 64)
|
||||
if err != nil {
|
||||
return zeroTime, fmt.Errorf("failed to parse float: %s", err)
|
||||
}
|
||||
// ...and strip that part
|
||||
str = append(str[:dot], str[tzIndex:]...)
|
||||
tzIndex = dot
|
||||
|
||||
if dot == 10 {
|
||||
fract = time.Duration(int64(f * float64(time.Hour)))
|
||||
format = `2006010215Z`
|
||||
} else {
|
||||
fract = time.Duration(int64(f * float64(time.Minute)))
|
||||
format = `200601021504Z`
|
||||
}
|
||||
|
||||
case 14:
|
||||
if tzIndex < dot {
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
str[dot] = '.'
|
||||
// no need for fractional seconds, time.Parse() handles that
|
||||
format = `20060102150405Z`
|
||||
|
||||
default:
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
|
||||
l := len(str)
|
||||
switch l - tzIndex {
|
||||
case 1:
|
||||
if str[l-1] != 'Z' {
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
case 3:
|
||||
format += `0700`
|
||||
str = append(str, []byte("00")...)
|
||||
case 5:
|
||||
format += `0700`
|
||||
default:
|
||||
return zeroTime, ErrInvalidTimeFormat
|
||||
}
|
||||
|
||||
t, err := time.Parse(format, string(str))
|
||||
if err != nil {
|
||||
return zeroTime, fmt.Errorf("%s: %s", ErrInvalidTimeFormat, err)
|
||||
}
|
||||
return t.Add(fract), nil
|
||||
}
|
||||
38
vendor/github.com/go-asn1-ber/asn1-ber/header.go
generated
vendored
Normal file
38
vendor/github.com/go-asn1-ber/asn1-ber/header.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
|
||||
var (
|
||||
c, l int
|
||||
i Identifier
|
||||
)
|
||||
|
||||
if i, c, err = readIdentifier(reader); err != nil {
|
||||
return Identifier{}, 0, read, err
|
||||
}
|
||||
identifier = i
|
||||
read += c
|
||||
|
||||
if l, c, err = readLength(reader); err != nil {
|
||||
return Identifier{}, 0, read, err
|
||||
}
|
||||
length = l
|
||||
read += c
|
||||
|
||||
// Validate length type with identifier (x.600, 8.1.3.2.a)
|
||||
if length == LengthIndefinite && identifier.TagType == TypePrimitive {
|
||||
return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
|
||||
}
|
||||
|
||||
if length < LengthIndefinite {
|
||||
err = fmt.Errorf("length cannot be less than %d", LengthIndefinite)
|
||||
return
|
||||
}
|
||||
|
||||
return identifier, length, read, nil
|
||||
}
|
||||
112
vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
generated
vendored
Normal file
112
vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readIdentifier(reader io.Reader) (Identifier, int, error) {
|
||||
identifier := Identifier{}
|
||||
read := 0
|
||||
|
||||
// identifier byte
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading identifier byte: %v\n", err)
|
||||
}
|
||||
return Identifier{}, read, err
|
||||
}
|
||||
read++
|
||||
|
||||
identifier.ClassType = Class(b) & ClassBitmask
|
||||
identifier.TagType = Type(b) & TypeBitmask
|
||||
|
||||
if tag := Tag(b) & TagBitmask; tag != HighTag {
|
||||
// short-form tag
|
||||
identifier.Tag = tag
|
||||
return identifier, read, nil
|
||||
}
|
||||
|
||||
// high-tag-number tag
|
||||
tagBytes := 0
|
||||
for {
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
|
||||
}
|
||||
return Identifier{}, read, unexpectedEOF(err)
|
||||
}
|
||||
tagBytes++
|
||||
read++
|
||||
|
||||
// Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
|
||||
identifier.Tag <<= 7
|
||||
identifier.Tag |= Tag(b) & HighTagValueBitmask
|
||||
|
||||
// First byte may not be all zeros (x.690, 8.1.2.4.2.c)
|
||||
if tagBytes == 1 && identifier.Tag == 0 {
|
||||
return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
|
||||
}
|
||||
// Overflow of int64
|
||||
// TODO: support big int tags?
|
||||
if tagBytes > 9 {
|
||||
return Identifier{}, read, errors.New("high-tag-number tag overflow")
|
||||
}
|
||||
|
||||
// Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
|
||||
if Tag(b)&HighTagContinueBitmask == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return identifier, read, nil
|
||||
}
|
||||
|
||||
func encodeIdentifier(identifier Identifier) []byte {
|
||||
b := []byte{0x0}
|
||||
b[0] |= byte(identifier.ClassType)
|
||||
b[0] |= byte(identifier.TagType)
|
||||
|
||||
if identifier.Tag < HighTag {
|
||||
// Short-form
|
||||
b[0] |= byte(identifier.Tag)
|
||||
} else {
|
||||
// high-tag-number
|
||||
b[0] |= byte(HighTag)
|
||||
|
||||
tag := identifier.Tag
|
||||
|
||||
b = append(b, encodeHighTag(tag)...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func encodeHighTag(tag Tag) []byte {
|
||||
// set cap=4 to hopefully avoid additional allocations
|
||||
b := make([]byte, 0, 4)
|
||||
for tag != 0 {
|
||||
// t := last 7 bits of tag (HighTagValueBitmask = 0x7F)
|
||||
t := tag & HighTagValueBitmask
|
||||
|
||||
// right shift tag 7 to remove what was just pulled off
|
||||
tag >>= 7
|
||||
|
||||
// if b already has entries this entry needs a continuation bit (0x80)
|
||||
if len(b) != 0 {
|
||||
t |= HighTagContinueBitmask
|
||||
}
|
||||
|
||||
b = append(b, byte(t))
|
||||
}
|
||||
// reverse
|
||||
// since bits were pulled off 'tag' small to high the byte slice is in reverse order.
|
||||
// example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)}
|
||||
// this needs to be reversed into 0x81 0x7F
|
||||
for i, j := 0, len(b)-1; i < len(b)/2; i++ {
|
||||
b[i], b[j-i] = b[j-i], b[i]
|
||||
}
|
||||
return b
|
||||
}
|
||||
81
vendor/github.com/go-asn1-ber/asn1-ber/length.go
generated
vendored
Normal file
81
vendor/github.com/go-asn1-ber/asn1-ber/length.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readLength(reader io.Reader) (length int, read int, err error) {
|
||||
// length byte
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading length byte: %v\n", err)
|
||||
}
|
||||
return 0, 0, unexpectedEOF(err)
|
||||
}
|
||||
read++
|
||||
|
||||
switch {
|
||||
case b == 0xFF:
|
||||
// Invalid 0xFF (x.600, 8.1.3.5.c)
|
||||
return 0, read, errors.New("invalid length byte 0xff")
|
||||
|
||||
case b == LengthLongFormBitmask:
|
||||
// Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
|
||||
length = LengthIndefinite
|
||||
|
||||
case b&LengthLongFormBitmask == 0:
|
||||
// Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
|
||||
length = int(b) & LengthValueBitmask
|
||||
|
||||
case b&LengthLongFormBitmask != 0:
|
||||
// Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
|
||||
lengthBytes := int(b) & LengthValueBitmask
|
||||
// Protect against overflow
|
||||
// TODO: support big int length?
|
||||
if lengthBytes > 8 {
|
||||
return 0, read, errors.New("long-form length overflow")
|
||||
}
|
||||
|
||||
// Accumulate into a 64-bit variable
|
||||
var length64 int64
|
||||
for i := 0; i < lengthBytes; i++ {
|
||||
b, err = readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
|
||||
}
|
||||
return 0, read, unexpectedEOF(err)
|
||||
}
|
||||
read++
|
||||
|
||||
// x.600, 8.1.3.5
|
||||
length64 <<= 8
|
||||
length64 |= int64(b)
|
||||
}
|
||||
|
||||
// Cast to a platform-specific integer
|
||||
length = int(length64)
|
||||
// Ensure we didn't overflow
|
||||
if int64(length) != length64 {
|
||||
return 0, read, errors.New("long-form length overflow")
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, read, errors.New("invalid length byte")
|
||||
}
|
||||
|
||||
return length, read, nil
|
||||
}
|
||||
|
||||
func encodeLength(length int) []byte {
|
||||
lengthBytes := encodeUnsignedInteger(uint64(length))
|
||||
if length > 127 || len(lengthBytes) > 1 {
|
||||
longFormBytes := []byte{LengthLongFormBitmask | byte(len(lengthBytes))}
|
||||
longFormBytes = append(longFormBytes, lengthBytes...)
|
||||
lengthBytes = longFormBytes
|
||||
}
|
||||
return lengthBytes
|
||||
}
|
||||
163
vendor/github.com/go-asn1-ber/asn1-ber/real.go
generated
vendored
Normal file
163
vendor/github.com/go-asn1-ber/asn1-ber/real.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func encodeFloat(v float64) []byte {
|
||||
switch {
|
||||
case math.IsInf(v, 1):
|
||||
return []byte{0x40}
|
||||
case math.IsInf(v, -1):
|
||||
return []byte{0x41}
|
||||
case math.IsNaN(v):
|
||||
return []byte{0x42}
|
||||
case v == 0.0:
|
||||
if math.Signbit(v) {
|
||||
return []byte{0x43}
|
||||
}
|
||||
return []byte{}
|
||||
default:
|
||||
// we take the easy part ;-)
|
||||
value := []byte(strconv.FormatFloat(v, 'G', -1, 64))
|
||||
var ret []byte
|
||||
if bytes.Contains(value, []byte{'E'}) {
|
||||
ret = []byte{0x03}
|
||||
} else {
|
||||
ret = []byte{0x02}
|
||||
}
|
||||
ret = append(ret, value...)
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
func ParseReal(v []byte) (val float64, err error) {
|
||||
if len(v) == 0 {
|
||||
return 0.0, nil
|
||||
}
|
||||
switch {
|
||||
case v[0]&0x80 == 0x80:
|
||||
val, err = parseBinaryFloat(v)
|
||||
case v[0]&0xC0 == 0x40:
|
||||
val, err = parseSpecialFloat(v)
|
||||
case v[0]&0xC0 == 0x0:
|
||||
val, err = parseDecimalFloat(v)
|
||||
default:
|
||||
return 0.0, fmt.Errorf("invalid info block")
|
||||
}
|
||||
if err != nil {
|
||||
return 0.0, err
|
||||
}
|
||||
|
||||
if val == 0.0 && !math.Signbit(val) {
|
||||
return 0.0, errors.New("REAL value +0 must be encoded with zero-length value block")
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func parseBinaryFloat(v []byte) (float64, error) {
|
||||
var info byte
|
||||
var buf []byte
|
||||
|
||||
info, v = v[0], v[1:]
|
||||
|
||||
var base int
|
||||
switch info & 0x30 {
|
||||
case 0x00:
|
||||
base = 2
|
||||
case 0x10:
|
||||
base = 8
|
||||
case 0x20:
|
||||
base = 16
|
||||
case 0x30:
|
||||
return 0.0, errors.New("bits 6 and 5 of information octet for REAL are equal to 11")
|
||||
}
|
||||
|
||||
scale := uint((info & 0x0c) >> 2)
|
||||
|
||||
var expLen int
|
||||
switch info & 0x03 {
|
||||
case 0x00:
|
||||
expLen = 1
|
||||
case 0x01:
|
||||
expLen = 2
|
||||
case 0x02:
|
||||
expLen = 3
|
||||
case 0x03:
|
||||
if len(v) < 2 {
|
||||
return 0.0, errors.New("invalid data")
|
||||
}
|
||||
expLen = int(v[0])
|
||||
if expLen > 8 {
|
||||
return 0.0, errors.New("too big value of exponent")
|
||||
}
|
||||
v = v[1:]
|
||||
}
|
||||
if expLen > len(v) {
|
||||
return 0.0, errors.New("too big value of exponent")
|
||||
}
|
||||
buf, v = v[:expLen], v[expLen:]
|
||||
exponent, err := ParseInt64(buf)
|
||||
if err != nil {
|
||||
return 0.0, err
|
||||
}
|
||||
|
||||
if len(v) > 8 {
|
||||
return 0.0, errors.New("too big value of mantissa")
|
||||
}
|
||||
|
||||
mant, err := ParseInt64(v)
|
||||
if err != nil {
|
||||
return 0.0, err
|
||||
}
|
||||
mantissa := mant << scale
|
||||
|
||||
if info&0x40 == 0x40 {
|
||||
mantissa = -mantissa
|
||||
}
|
||||
|
||||
return float64(mantissa) * math.Pow(float64(base), float64(exponent)), nil
|
||||
}
|
||||
|
||||
func parseDecimalFloat(v []byte) (val float64, err error) {
|
||||
switch v[0] & 0x3F {
|
||||
case 0x01: // NR form 1
|
||||
var iVal int64
|
||||
iVal, err = strconv.ParseInt(strings.TrimLeft(string(v[1:]), " "), 10, 64)
|
||||
val = float64(iVal)
|
||||
case 0x02, 0x03: // NR form 2, 3
|
||||
val, err = strconv.ParseFloat(strings.Replace(strings.TrimLeft(string(v[1:]), " "), ",", ".", -1), 64)
|
||||
default:
|
||||
err = errors.New("incorrect NR form")
|
||||
}
|
||||
if err != nil {
|
||||
return 0.0, err
|
||||
}
|
||||
|
||||
if val == 0.0 && math.Signbit(val) {
|
||||
return 0.0, errors.New("REAL value -0 must be encoded as a special value")
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func parseSpecialFloat(v []byte) (float64, error) {
|
||||
if len(v) != 1 {
|
||||
return 0.0, errors.New(`encoding of "special value" must not contain exponent and mantissa`)
|
||||
}
|
||||
switch v[0] {
|
||||
case 0x40:
|
||||
return math.Inf(1), nil
|
||||
case 0x41:
|
||||
return math.Inf(-1), nil
|
||||
case 0x42:
|
||||
return math.NaN(), nil
|
||||
case 0x43:
|
||||
return math.Copysign(0, -1), nil
|
||||
}
|
||||
return 0.0, errors.New(`encoding of "special value" not from ASN.1 standard`)
|
||||
}
|
||||
28
vendor/github.com/go-asn1-ber/asn1-ber/util.go
generated
vendored
Normal file
28
vendor/github.com/go-asn1-ber/asn1-ber/util.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package ber
|
||||
|
||||
import "io"
|
||||
|
||||
func readByte(reader io.Reader) (byte, error) {
|
||||
bytes := make([]byte, 1)
|
||||
_, err := io.ReadFull(reader, bytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return bytes[0], nil
|
||||
}
|
||||
|
||||
func unexpectedEOF(err error) error {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func isEOCPacket(p *Packet) bool {
|
||||
return p != nil &&
|
||||
p.Tag == TagEOC &&
|
||||
p.ClassType == ClassUniversal &&
|
||||
p.TagType == TypePrimitive &&
|
||||
len(p.ByteValue) == 0 &&
|
||||
len(p.Children) == 0
|
||||
}
|
||||
22
vendor/github.com/go-ldap/ldap/v3/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-ldap/ldap/v3/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
|
||||
Portions copyright (c) 2015-2016 go-ldap Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
89
vendor/github.com/go-ldap/ldap/v3/add.go
generated
vendored
Normal file
89
vendor/github.com/go-ldap/ldap/v3/add.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Attribute represents an LDAP attribute
|
||||
type Attribute struct {
|
||||
// Type is the name of the LDAP attribute
|
||||
Type string
|
||||
// Vals are the LDAP attribute values
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func (a *Attribute) encode() *ber.Packet {
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
|
||||
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
|
||||
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
|
||||
for _, value := range a.Vals {
|
||||
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
|
||||
}
|
||||
seq.AppendChild(set)
|
||||
return seq
|
||||
}
|
||||
|
||||
// AddRequest represents an LDAP AddRequest operation
|
||||
type AddRequest struct {
|
||||
// DN identifies the entry being added
|
||||
DN string
|
||||
// Attributes list the attributes of the new entry
|
||||
Attributes []Attribute
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *AddRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
|
||||
for _, attribute := range req.Attributes {
|
||||
attributes.AppendChild(attribute.encode())
|
||||
}
|
||||
pkt.AppendChild(attributes)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attribute adds an attribute with the given type and values
|
||||
func (req *AddRequest) Attribute(attrType string, attrVals []string) {
|
||||
req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals})
|
||||
}
|
||||
|
||||
// NewAddRequest returns an AddRequest for the given DN, with no attributes
|
||||
func NewAddRequest(dn string, controls []Control) *AddRequest {
|
||||
return &AddRequest{
|
||||
DN: dn,
|
||||
Controls: controls,
|
||||
}
|
||||
}
|
||||
|
||||
// Add performs the given AddRequest
|
||||
func (l *Conn) Add(addRequest *AddRequest) error {
|
||||
msgCtx, err := l.doRequest(addRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationAddResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
735
vendor/github.com/go-ldap/ldap/v3/bind.go
generated
vendored
Normal file
735
vendor/github.com/go-ldap/ldap/v3/bind.go
generated
vendored
Normal file
@@ -0,0 +1,735 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
enchex "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/go-ntlmssp"
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// SimpleBindRequest represents a username/password bind operation
|
||||
type SimpleBindRequest struct {
|
||||
// Username is the name of the Directory object that the client wishes to bind as
|
||||
Username string
|
||||
// Password is the credentials to bind with
|
||||
Password string
|
||||
// Controls are optional controls to send with the bind request
|
||||
Controls []Control
|
||||
// AllowEmptyPassword sets whether the client allows binding with an empty password
|
||||
// (normally used for unauthenticated bind).
|
||||
AllowEmptyPassword bool
|
||||
}
|
||||
|
||||
// SimpleBindResult contains the response from the server
|
||||
type SimpleBindResult struct {
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// NewSimpleBindRequest returns a bind request
|
||||
func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
|
||||
return &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Controls: controls,
|
||||
AllowEmptyPassword: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password"))
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SimpleBind performs the simple bind operation defined in the given request
|
||||
func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
|
||||
if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword {
|
||||
return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
|
||||
}
|
||||
|
||||
msgCtx, err := l.doRequest(simpleBindRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &SimpleBindResult{
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
decodedChild, decodeErr := DecodeControl(child)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode child control: %s", decodeErr)
|
||||
}
|
||||
result.Controls = append(result.Controls, decodedChild)
|
||||
}
|
||||
}
|
||||
|
||||
err = GetLDAPError(packet)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Bind performs a bind with the given username and password.
|
||||
//
|
||||
// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method
|
||||
// for that.
|
||||
func (l *Conn) Bind(username, password string) error {
|
||||
req := &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: password,
|
||||
AllowEmptyPassword: false,
|
||||
}
|
||||
_, err := l.SimpleBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// UnauthenticatedBind performs an unauthenticated bind.
|
||||
//
|
||||
// A username may be provided for trace (e.g. logging) purpose only, but it is normally not
|
||||
// authenticated or otherwise validated by the LDAP server.
|
||||
//
|
||||
// See https://tools.ietf.org/html/rfc4513#section-5.1.2 .
|
||||
// See https://tools.ietf.org/html/rfc4513#section-6.3.1 .
|
||||
func (l *Conn) UnauthenticatedBind(username string) error {
|
||||
req := &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: "",
|
||||
AllowEmptyPassword: true,
|
||||
}
|
||||
_, err := l.SimpleBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DigestMD5BindRequest represents a digest-md5 bind operation
|
||||
type DigestMD5BindRequest struct {
|
||||
Host string
|
||||
// Username is the name of the Directory object that the client wishes to bind as
|
||||
Username string
|
||||
// Password is the credentials to bind with
|
||||
Password string
|
||||
// Controls are optional controls to send with the bind request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *DigestMD5BindRequest) appendTo(envelope *ber.Packet) error {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
|
||||
auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "DIGEST-MD5", "SASL Mech"))
|
||||
request.AppendChild(auth)
|
||||
envelope.AppendChild(request)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DigestMD5BindResult contains the response from the server
|
||||
type DigestMD5BindResult struct {
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// MD5Bind performs a digest-md5 bind with the given host, username and password.
|
||||
func (l *Conn) MD5Bind(host, username, password string) error {
|
||||
req := &DigestMD5BindRequest{
|
||||
Host: host,
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
_, err := l.DigestMD5Bind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DigestMD5Bind performs the digest-md5 bind operation defined in the given request
|
||||
func (l *Conn) DigestMD5Bind(digestMD5BindRequest *DigestMD5BindRequest) (*DigestMD5BindResult, error) {
|
||||
if digestMD5BindRequest.Password == "" {
|
||||
return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
|
||||
}
|
||||
|
||||
msgCtx, err := l.doRequest(digestMD5BindRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if l.Debug {
|
||||
if err = addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
result := &DigestMD5BindResult{
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
var params map[string]string
|
||||
if len(packet.Children) == 2 {
|
||||
if len(packet.Children[1].Children) == 4 {
|
||||
child := packet.Children[1].Children[0]
|
||||
if child.Tag != ber.TagEnumerated {
|
||||
return result, GetLDAPError(packet)
|
||||
}
|
||||
if child.Value.(int64) != 14 {
|
||||
return result, GetLDAPError(packet)
|
||||
}
|
||||
child = packet.Children[1].Children[3]
|
||||
if child.Tag != ber.TagObjectDescriptor {
|
||||
return result, GetLDAPError(packet)
|
||||
}
|
||||
if child.Data == nil {
|
||||
return result, GetLDAPError(packet)
|
||||
}
|
||||
data, _ := ioutil.ReadAll(child.Data)
|
||||
params, err = parseParams(string(data))
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("parsing digest-challenge: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if params != nil {
|
||||
resp := computeResponse(
|
||||
params,
|
||||
"ldap/"+strings.ToLower(digestMD5BindRequest.Host),
|
||||
digestMD5BindRequest.Username,
|
||||
digestMD5BindRequest.Password,
|
||||
)
|
||||
packet = ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
|
||||
auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "DIGEST-MD5", "SASL Mech"))
|
||||
auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, resp, "Credentials"))
|
||||
request.AppendChild(auth)
|
||||
packet.AppendChild(request)
|
||||
msgCtx, err = l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("send message: %s", err)
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read packet: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = GetLDAPError(packet)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func parseParams(str string) (map[string]string, error) {
|
||||
m := make(map[string]string)
|
||||
var key, value string
|
||||
var state int
|
||||
for i := 0; i <= len(str); i++ {
|
||||
switch state {
|
||||
case 0: // reading key
|
||||
if i == len(str) {
|
||||
return nil, fmt.Errorf("syntax error on %d", i)
|
||||
}
|
||||
if str[i] != '=' {
|
||||
key += string(str[i])
|
||||
continue
|
||||
}
|
||||
state = 1
|
||||
case 1: // reading value
|
||||
if i == len(str) {
|
||||
m[key] = value
|
||||
break
|
||||
}
|
||||
switch str[i] {
|
||||
case ',':
|
||||
m[key] = value
|
||||
state = 0
|
||||
key = ""
|
||||
value = ""
|
||||
case '"':
|
||||
if value != "" {
|
||||
return nil, fmt.Errorf("syntax error on %d", i)
|
||||
}
|
||||
state = 2
|
||||
default:
|
||||
value += string(str[i])
|
||||
}
|
||||
case 2: // inside quotes
|
||||
if i == len(str) {
|
||||
return nil, fmt.Errorf("syntax error on %d", i)
|
||||
}
|
||||
if str[i] != '"' {
|
||||
value += string(str[i])
|
||||
} else {
|
||||
state = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func computeResponse(params map[string]string, uri, username, password string) string {
|
||||
nc := "00000001"
|
||||
qop := "auth"
|
||||
cnonce := enchex.EncodeToString(randomBytes(16))
|
||||
x := username + ":" + params["realm"] + ":" + password
|
||||
y := md5Hash([]byte(x))
|
||||
|
||||
a1 := bytes.NewBuffer(y)
|
||||
a1.WriteString(":" + params["nonce"] + ":" + cnonce)
|
||||
if len(params["authzid"]) > 0 {
|
||||
a1.WriteString(":" + params["authzid"])
|
||||
}
|
||||
a2 := bytes.NewBuffer([]byte("AUTHENTICATE"))
|
||||
a2.WriteString(":" + uri)
|
||||
ha1 := enchex.EncodeToString(md5Hash(a1.Bytes()))
|
||||
ha2 := enchex.EncodeToString(md5Hash(a2.Bytes()))
|
||||
|
||||
kd := ha1
|
||||
kd += ":" + params["nonce"]
|
||||
kd += ":" + nc
|
||||
kd += ":" + cnonce
|
||||
kd += ":" + qop
|
||||
kd += ":" + ha2
|
||||
resp := enchex.EncodeToString(md5Hash([]byte(kd)))
|
||||
return fmt.Sprintf(
|
||||
`username="%s",realm="%s",nonce="%s",cnonce="%s",nc=00000001,qop=%s,digest-uri="%s",response=%s`,
|
||||
username,
|
||||
params["realm"],
|
||||
params["nonce"],
|
||||
cnonce,
|
||||
qop,
|
||||
uri,
|
||||
resp,
|
||||
)
|
||||
}
|
||||
|
||||
func md5Hash(b []byte) []byte {
|
||||
hasher := md5.New()
|
||||
hasher.Write(b)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
func randomBytes(len int) []byte {
|
||||
b := make([]byte, len)
|
||||
for i := 0; i < len; i++ {
|
||||
b[i] = byte(rand.Intn(256))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
var externalBindRequest = requestFunc(func(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
|
||||
saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech"))
|
||||
saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred"))
|
||||
|
||||
pkt.AppendChild(saslAuth)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// ExternalBind performs SASL/EXTERNAL authentication.
|
||||
//
|
||||
// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind.
|
||||
//
|
||||
// See https://tools.ietf.org/html/rfc4422#appendix-A
|
||||
func (l *Conn) ExternalBind() error {
|
||||
msgCtx, err := l.doRequest(externalBindRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return GetLDAPError(packet)
|
||||
}
|
||||
|
||||
// NTLMBind performs an NTLMSSP bind leveraging https://github.com/Azure/go-ntlmssp
|
||||
|
||||
// NTLMBindRequest represents an NTLMSSP bind operation
|
||||
type NTLMBindRequest struct {
|
||||
// Domain is the AD Domain to authenticate too. If not specified, it will be grabbed from the NTLMSSP Challenge
|
||||
Domain string
|
||||
// Username is the name of the Directory object that the client wishes to bind as
|
||||
Username string
|
||||
// Password is the credentials to bind with
|
||||
Password string
|
||||
// AllowEmptyPassword sets whether the client allows binding with an empty password
|
||||
// (normally used for unauthenticated bind).
|
||||
AllowEmptyPassword bool
|
||||
// Hash is the hex NTLM hash to bind with. Password or hash must be provided
|
||||
Hash string
|
||||
// Controls are optional controls to send with the bind request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *NTLMBindRequest) appendTo(envelope *ber.Packet) error {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
// generate an NTLMSSP Negotiation message for the specified domain (it can be blank)
|
||||
negMessage, err := ntlmssp.NewNegotiateMessage(req.Domain, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("err creating negmessage: %s", err)
|
||||
}
|
||||
|
||||
// append the generated NTLMSSP message as a TagEnumerated BER value
|
||||
auth := ber.Encode(ber.ClassContext, ber.TypePrimitive, ber.TagEnumerated, negMessage, "authentication")
|
||||
request.AppendChild(auth)
|
||||
envelope.AppendChild(request)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NTLMBindResult contains the response from the server
|
||||
type NTLMBindResult struct {
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// NTLMBind performs an NTLMSSP Bind with the given domain, username and password
|
||||
func (l *Conn) NTLMBind(domain, username, password string) error {
|
||||
req := &NTLMBindRequest{
|
||||
Domain: domain,
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
_, err := l.NTLMChallengeBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// NTLMUnauthenticatedBind performs an bind with an empty password.
|
||||
//
|
||||
// A username is required. The anonymous bind is not (yet) supported by the go-ntlmssp library (https://github.com/Azure/go-ntlmssp/blob/819c794454d067543bc61d29f61fef4b3c3df62c/authenticate_message.go#L87)
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/b38c36ed-2804-4868-a9ff-8dd3182128e4 part 3.2.5.1.2
|
||||
func (l *Conn) NTLMUnauthenticatedBind(domain, username string) error {
|
||||
req := &NTLMBindRequest{
|
||||
Domain: domain,
|
||||
Username: username,
|
||||
Password: "",
|
||||
AllowEmptyPassword: true,
|
||||
}
|
||||
_, err := l.NTLMChallengeBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// NTLMBindWithHash performs an NTLM Bind with an NTLM hash instead of plaintext password (pass-the-hash)
|
||||
func (l *Conn) NTLMBindWithHash(domain, username, hash string) error {
|
||||
req := &NTLMBindRequest{
|
||||
Domain: domain,
|
||||
Username: username,
|
||||
Hash: hash,
|
||||
}
|
||||
_, err := l.NTLMChallengeBind(req)
|
||||
return err
|
||||
}
|
||||
|
||||
// NTLMChallengeBind performs the NTLMSSP bind operation defined in the given request
|
||||
func (l *Conn) NTLMChallengeBind(ntlmBindRequest *NTLMBindRequest) (*NTLMBindResult, error) {
|
||||
if !ntlmBindRequest.AllowEmptyPassword && ntlmBindRequest.Password == "" && ntlmBindRequest.Hash == "" {
|
||||
return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
|
||||
}
|
||||
|
||||
msgCtx, err := l.doRequest(ntlmBindRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if l.Debug {
|
||||
if err = addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
result := &NTLMBindResult{
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
var ntlmsspChallenge []byte
|
||||
|
||||
// now find the NTLM Response Message
|
||||
if len(packet.Children) == 2 {
|
||||
if len(packet.Children[1].Children) == 3 {
|
||||
child := packet.Children[1].Children[1]
|
||||
ntlmsspChallenge = child.ByteValue
|
||||
// Check to make sure we got the right message. It will always start with NTLMSSP
|
||||
if len(ntlmsspChallenge) < 7 || !bytes.Equal(ntlmsspChallenge[:7], []byte("NTLMSSP")) {
|
||||
return result, GetLDAPError(packet)
|
||||
}
|
||||
l.Debug.Printf("%d: found ntlmssp challenge", msgCtx.id)
|
||||
}
|
||||
}
|
||||
if ntlmsspChallenge != nil {
|
||||
var err error
|
||||
var responseMessage []byte
|
||||
// generate a response message to the challenge with the given Username/Password if password is provided
|
||||
if ntlmBindRequest.Hash != "" {
|
||||
responseMessage, err = ntlmssp.ProcessChallengeWithHash(ntlmsspChallenge, ntlmBindRequest.Username, ntlmBindRequest.Hash)
|
||||
} else if ntlmBindRequest.Password != "" || ntlmBindRequest.AllowEmptyPassword {
|
||||
_, _, domainNeeded := ntlmssp.GetDomain(ntlmBindRequest.Username)
|
||||
responseMessage, err = ntlmssp.ProcessChallenge(ntlmsspChallenge, ntlmBindRequest.Username, ntlmBindRequest.Password, domainNeeded)
|
||||
} else {
|
||||
err = fmt.Errorf("need a password or hash to generate reply")
|
||||
}
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("parsing ntlm-challenge: %s", err)
|
||||
}
|
||||
packet = ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
// append the challenge response message as a TagEmbeddedPDV BER value
|
||||
auth := ber.Encode(ber.ClassContext, ber.TypePrimitive, ber.TagEmbeddedPDV, responseMessage, "authentication")
|
||||
|
||||
request.AppendChild(auth)
|
||||
packet.AppendChild(request)
|
||||
msgCtx, err = l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("send message: %s", err)
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read packet: %s", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = GetLDAPError(packet)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// GSSAPIClient interface is used as the client-side implementation for the
|
||||
// GSSAPI SASL mechanism.
|
||||
// Interface inspired by GSSAPIClient from golang.org/x/crypto/ssh
|
||||
type GSSAPIClient interface {
|
||||
// InitSecContext initiates the establishment of a security context for
|
||||
// GSS-API between the client and server.
|
||||
// Initially the token parameter should be specified as nil.
|
||||
// The routine may return a outputToken which should be transferred to
|
||||
// the server, where the server will present it to AcceptSecContext.
|
||||
// If no token need be sent, InitSecContext will indicate this by setting
|
||||
// needContinue to false. To complete the context
|
||||
// establishment, one or more reply tokens may be required from the server;
|
||||
// if so, InitSecContext will return a needContinue which is true.
|
||||
// In this case, InitSecContext should be called again when the
|
||||
// reply token is received from the server, passing the reply token
|
||||
// to InitSecContext via the token parameters.
|
||||
// See RFC 4752 section 3.1.
|
||||
InitSecContext(target string, token []byte) (outputToken []byte, needContinue bool, err error)
|
||||
// NegotiateSaslAuth performs the last step of the Sasl handshake.
|
||||
// It takes a token, which, when unwrapped, describes the servers supported
|
||||
// security layers (first octet) and maximum receive buffer (remaining
|
||||
// three octets).
|
||||
// If the received token is unacceptable an error must be returned to abort
|
||||
// the handshake.
|
||||
// Outputs a signed token describing the client's selected security layer
|
||||
// and receive buffer size and optionally an authorization identity.
|
||||
// The returned token will be sent to the server and the handshake considered
|
||||
// completed successfully and the server authenticated.
|
||||
// See RFC 4752 section 3.1.
|
||||
NegotiateSaslAuth(token []byte, authzid string) ([]byte, error)
|
||||
// DeleteSecContext destroys any established secure context.
|
||||
DeleteSecContext() error
|
||||
}
|
||||
|
||||
// GSSAPIBindRequest represents a GSSAPI SASL mechanism bind request.
|
||||
// See rfc4752 and rfc4513 section 5.2.1.2.
|
||||
type GSSAPIBindRequest struct {
|
||||
// Service Principal Name user for the service ticket. Eg. "ldap/<host>"
|
||||
ServicePrincipalName string
|
||||
// (Optional) Authorization entity
|
||||
AuthZID string
|
||||
// (Optional) Controls to send with the bind request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// GSSAPIBind performs the GSSAPI SASL bind using the provided GSSAPI client.
|
||||
func (l *Conn) GSSAPIBind(client GSSAPIClient, servicePrincipal, authzid string) error {
|
||||
return l.GSSAPIBindRequest(client, &GSSAPIBindRequest{
|
||||
ServicePrincipalName: servicePrincipal,
|
||||
AuthZID: authzid,
|
||||
})
|
||||
}
|
||||
|
||||
// GSSAPIBindRequest performs the GSSAPI SASL bind using the provided GSSAPI client.
|
||||
func (l *Conn) GSSAPIBindRequest(client GSSAPIClient, req *GSSAPIBindRequest) error {
|
||||
//nolint:errcheck
|
||||
defer client.DeleteSecContext()
|
||||
|
||||
var err error
|
||||
var reqToken []byte
|
||||
var recvToken []byte
|
||||
needInit := true
|
||||
for {
|
||||
if needInit {
|
||||
// Establish secure context between client and server.
|
||||
reqToken, needInit, err = client.InitSecContext(req.ServicePrincipalName, recvToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Secure context is set up, perform the last step of SASL handshake.
|
||||
reqToken, err = client.NegotiateSaslAuth(recvToken, req.AuthZID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Send Bind request containing the current token and extract the
|
||||
// token sent by server.
|
||||
recvToken, err = l.saslBindTokenExchange(req.Controls, reqToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !needInit && len(recvToken) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Conn) saslBindTokenExchange(reqControls []Control, reqToken []byte) ([]byte, error) {
|
||||
// Construct LDAP Bind request with GSSAPI SASL mechanism.
|
||||
envelope := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
envelope.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
|
||||
|
||||
auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
|
||||
auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "GSSAPI", "SASL Mech"))
|
||||
if len(reqToken) > 0 {
|
||||
auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(reqToken), "Credentials"))
|
||||
}
|
||||
request.AppendChild(auth)
|
||||
envelope.AppendChild(request)
|
||||
if len(reqControls) > 0 {
|
||||
envelope.AppendChild(encodeControls(reqControls))
|
||||
}
|
||||
|
||||
msgCtx, err := l.sendMessage(envelope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if l.Debug {
|
||||
if err = addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
// https://www.rfc-editor.org/rfc/rfc4511#section-4.1.1
|
||||
// packet is an envelope
|
||||
// child 0 is message id
|
||||
// child 1 is protocolOp
|
||||
if len(packet.Children) != 2 {
|
||||
return nil, fmt.Errorf("bad bind response")
|
||||
}
|
||||
|
||||
protocolOp := packet.Children[1]
|
||||
RESP:
|
||||
switch protocolOp.Description {
|
||||
case "Bind Response": // Bind Response
|
||||
// Bind Reponse is an LDAP Response (https://www.rfc-editor.org/rfc/rfc4511#section-4.1.9)
|
||||
// with an additional optional serverSaslCreds string (https://www.rfc-editor.org/rfc/rfc4511#section-4.2.2)
|
||||
// child 0 is resultCode
|
||||
resultCode := protocolOp.Children[0]
|
||||
if resultCode.Tag != ber.TagEnumerated {
|
||||
break RESP
|
||||
}
|
||||
switch resultCode.Value.(int64) {
|
||||
case 14: // Sasl bind in progress
|
||||
if len(protocolOp.Children) < 3 {
|
||||
break RESP
|
||||
}
|
||||
referral := protocolOp.Children[3]
|
||||
switch referral.Description {
|
||||
case "Referral":
|
||||
if referral.ClassType != ber.ClassContext || referral.Tag != ber.TagObjectDescriptor {
|
||||
break RESP
|
||||
}
|
||||
return ioutil.ReadAll(referral.Data)
|
||||
}
|
||||
// Optional:
|
||||
//if len(protocolOp.Children) == 4 {
|
||||
// serverSaslCreds := protocolOp.Children[4]
|
||||
//}
|
||||
case 0: // Success - Bind OK.
|
||||
// SASL layer in effect (if any) (See https://www.rfc-editor.org/rfc/rfc4513#section-5.2.1.4)
|
||||
// NOTE: SASL security layers are not supported currently.
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, GetLDAPError(packet)
|
||||
}
|
||||
41
vendor/github.com/go-ldap/ldap/v3/client.go
generated
vendored
Normal file
41
vendor/github.com/go-ldap/ldap/v3/client.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client knows how to interact with an LDAP server
|
||||
type Client interface {
|
||||
Start()
|
||||
StartTLS(*tls.Config) error
|
||||
Close() error
|
||||
GetLastError() error
|
||||
IsClosing() bool
|
||||
SetTimeout(time.Duration)
|
||||
TLSConnectionState() (tls.ConnectionState, bool)
|
||||
|
||||
Bind(username, password string) error
|
||||
UnauthenticatedBind(username string) error
|
||||
SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error)
|
||||
ExternalBind() error
|
||||
NTLMUnauthenticatedBind(domain, username string) error
|
||||
Unbind() error
|
||||
|
||||
Add(*AddRequest) error
|
||||
Del(*DelRequest) error
|
||||
Modify(*ModifyRequest) error
|
||||
ModifyDN(*ModifyDNRequest) error
|
||||
ModifyWithResult(*ModifyRequest) (*ModifyResult, error)
|
||||
|
||||
Compare(dn, attribute, value string) (bool, error)
|
||||
PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error)
|
||||
|
||||
Search(*SearchRequest) (*SearchResult, error)
|
||||
SearchAsync(ctx context.Context, searchRequest *SearchRequest, bufferSize int) Response
|
||||
SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
|
||||
DirSync(searchRequest *SearchRequest, flags, maxAttrCount int64, cookie []byte) (*SearchResult, error)
|
||||
DirSyncAsync(ctx context.Context, searchRequest *SearchRequest, bufferSize int, flags, maxAttrCount int64, cookie []byte) Response
|
||||
Syncrepl(ctx context.Context, searchRequest *SearchRequest, bufferSize int, mode ControlSyncRequestMode, cookie []byte, reloadHint bool) Response
|
||||
}
|
||||
62
vendor/github.com/go-ldap/ldap/v3/compare.go
generated
vendored
Normal file
62
vendor/github.com/go-ldap/ldap/v3/compare.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// CompareRequest represents an LDAP CompareRequest operation.
|
||||
type CompareRequest struct {
|
||||
DN string
|
||||
Attribute string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (req *CompareRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
|
||||
ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
|
||||
ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc"))
|
||||
ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue"))
|
||||
|
||||
pkt.AppendChild(ava)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
|
||||
// false with any error that occurs if any.
|
||||
func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
|
||||
msgCtx, err := l.doRequest(&CompareRequest{
|
||||
DN: dn,
|
||||
Attribute: attribute,
|
||||
Value: value,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationCompareResponse {
|
||||
err := GetLDAPError(packet)
|
||||
|
||||
switch {
|
||||
case IsErrorWithCode(err, LDAPResultCompareTrue):
|
||||
return true, nil
|
||||
case IsErrorWithCode(err, LDAPResultCompareFalse):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
629
vendor/github.com/go-ldap/ldap/v3/conn.go
generated
vendored
Normal file
629
vendor/github.com/go-ldap/ldap/v3/conn.go
generated
vendored
Normal file
@@ -0,0 +1,629 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
const (
|
||||
// MessageQuit causes the processMessages loop to exit
|
||||
MessageQuit = 0
|
||||
// MessageRequest sends a request to the server
|
||||
MessageRequest = 1
|
||||
// MessageResponse receives a response from the server
|
||||
MessageResponse = 2
|
||||
// MessageFinish indicates the client considers a particular message ID to be finished
|
||||
MessageFinish = 3
|
||||
// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
|
||||
MessageTimeout = 4
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultLdapPort default ldap port for pure TCP connection
|
||||
DefaultLdapPort = "389"
|
||||
// DefaultLdapsPort default ldap port for SSL connection
|
||||
DefaultLdapsPort = "636"
|
||||
)
|
||||
|
||||
// PacketResponse contains the packet or error encountered reading a response
|
||||
type PacketResponse struct {
|
||||
// Packet is the packet read from the server
|
||||
Packet *ber.Packet
|
||||
// Error is an error encountered while reading
|
||||
Error error
|
||||
}
|
||||
|
||||
// ReadPacket returns the packet or an error
|
||||
func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
|
||||
if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
|
||||
}
|
||||
return pr.Packet, pr.Error
|
||||
}
|
||||
|
||||
type messageContext struct {
|
||||
id int64
|
||||
// close(done) should only be called from finishMessage()
|
||||
done chan struct{}
|
||||
// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
|
||||
responses chan *PacketResponse
|
||||
}
|
||||
|
||||
// sendResponse should only be called within the processMessages() loop which
|
||||
// is also responsible for closing the responses channel.
|
||||
func (msgCtx *messageContext) sendResponse(packet *PacketResponse, timeout time.Duration) {
|
||||
timeoutCtx := context.Background()
|
||||
if timeout > 0 {
|
||||
var cancelFunc context.CancelFunc
|
||||
timeoutCtx, cancelFunc = context.WithTimeout(context.Background(), timeout)
|
||||
defer cancelFunc()
|
||||
}
|
||||
select {
|
||||
case msgCtx.responses <- packet:
|
||||
// Successfully sent packet to message handler.
|
||||
case <-msgCtx.done:
|
||||
// The request handler is done and will not receive more
|
||||
// packets.
|
||||
case <-timeoutCtx.Done():
|
||||
// The timeout was reached before the packet was sent.
|
||||
}
|
||||
}
|
||||
|
||||
type messagePacket struct {
|
||||
Op int
|
||||
MessageID int64
|
||||
Packet *ber.Packet
|
||||
Context *messageContext
|
||||
}
|
||||
|
||||
type sendMessageFlags uint
|
||||
|
||||
const (
|
||||
startTLS sendMessageFlags = 1 << iota
|
||||
)
|
||||
|
||||
// Conn represents an LDAP Connection
|
||||
type Conn struct {
|
||||
// requestTimeout is loaded atomically
|
||||
// so we need to ensure 64-bit alignment on 32-bit platforms.
|
||||
// https://github.com/go-ldap/ldap/pull/199
|
||||
requestTimeout int64
|
||||
conn net.Conn
|
||||
isTLS bool
|
||||
closing uint32
|
||||
closeErr atomic.Value
|
||||
isStartingTLS bool
|
||||
Debug debugging
|
||||
chanConfirm chan struct{}
|
||||
messageContexts map[int64]*messageContext
|
||||
chanMessage chan *messagePacket
|
||||
chanMessageID chan int64
|
||||
wgClose sync.WaitGroup
|
||||
outstandingRequests uint
|
||||
messageMutex sync.Mutex
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
var _ Client = &Conn{}
|
||||
|
||||
// DefaultTimeout is a package-level variable that sets the timeout value
|
||||
// used for the Dial and DialTLS methods.
|
||||
//
|
||||
// WARNING: since this is a package-level variable, setting this value from
|
||||
// multiple places will probably result in undesired behaviour.
|
||||
var DefaultTimeout = 60 * time.Second
|
||||
|
||||
// DialOpt configures DialContext.
|
||||
type DialOpt func(*DialContext)
|
||||
|
||||
// DialWithDialer updates net.Dialer in DialContext.
|
||||
func DialWithDialer(d *net.Dialer) DialOpt {
|
||||
return func(dc *DialContext) {
|
||||
dc.dialer = d
|
||||
}
|
||||
}
|
||||
|
||||
// DialWithTLSConfig updates tls.Config in DialContext.
|
||||
func DialWithTLSConfig(tc *tls.Config) DialOpt {
|
||||
return func(dc *DialContext) {
|
||||
dc.tlsConfig = tc
|
||||
}
|
||||
}
|
||||
|
||||
// DialWithTLSDialer is a wrapper for DialWithTLSConfig with the option to
|
||||
// specify a net.Dialer to for example define a timeout or a custom resolver.
|
||||
// @deprecated Use DialWithDialer and DialWithTLSConfig instead
|
||||
func DialWithTLSDialer(tlsConfig *tls.Config, dialer *net.Dialer) DialOpt {
|
||||
return func(dc *DialContext) {
|
||||
dc.tlsConfig = tlsConfig
|
||||
dc.dialer = dialer
|
||||
}
|
||||
}
|
||||
|
||||
// DialContext contains necessary parameters to dial the given ldap URL.
|
||||
type DialContext struct {
|
||||
dialer *net.Dialer
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
func (dc *DialContext) dial(u *url.URL) (net.Conn, error) {
|
||||
if u.Scheme == "ldapi" {
|
||||
if u.Path == "" || u.Path == "/" {
|
||||
u.Path = "/var/run/slapd/ldapi"
|
||||
}
|
||||
return dc.dialer.Dial("unix", u.Path)
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
// we assume that error is due to missing port
|
||||
host = u.Host
|
||||
port = ""
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "cldap":
|
||||
if port == "" {
|
||||
port = DefaultLdapPort
|
||||
}
|
||||
return dc.dialer.Dial("udp", net.JoinHostPort(host, port))
|
||||
case "ldap":
|
||||
if port == "" {
|
||||
port = DefaultLdapPort
|
||||
}
|
||||
return dc.dialer.Dial("tcp", net.JoinHostPort(host, port))
|
||||
case "ldaps":
|
||||
if port == "" {
|
||||
port = DefaultLdapsPort
|
||||
}
|
||||
return tls.DialWithDialer(dc.dialer, "tcp", net.JoinHostPort(host, port), dc.tlsConfig)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unknown scheme '%s'", u.Scheme)
|
||||
}
|
||||
|
||||
// Dial connects to the given address on the given network using net.Dial
|
||||
// and then returns a new Conn for the connection.
|
||||
// @deprecated Use DialURL instead.
|
||||
func Dial(network, addr string) (*Conn, error) {
|
||||
c, err := net.DialTimeout(network, addr, DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
conn := NewConn(c, false)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialTLS connects to the given address on the given network using tls.Dial
|
||||
// and then returns a new Conn for the connection.
|
||||
// @deprecated Use DialURL instead.
|
||||
func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
|
||||
c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
conn := NewConn(c, true)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialURL connects to the given ldap URL.
|
||||
// The following schemas are supported: ldap://, ldaps://, ldapi://,
|
||||
// and cldap:// (RFC1798, deprecated but used by Active Directory).
|
||||
// On success a new Conn for the connection is returned.
|
||||
func DialURL(addr string, opts ...DialOpt) (*Conn, error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
|
||||
var dc DialContext
|
||||
for _, opt := range opts {
|
||||
opt(&dc)
|
||||
}
|
||||
if dc.dialer == nil {
|
||||
dc.dialer = &net.Dialer{Timeout: DefaultTimeout}
|
||||
}
|
||||
|
||||
c, err := dc.dial(u)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
|
||||
conn := NewConn(c, u.Scheme == "ldaps")
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// NewConn returns a new Conn using conn for network I/O.
|
||||
func NewConn(conn net.Conn, isTLS bool) *Conn {
|
||||
l := &Conn{
|
||||
conn: conn,
|
||||
chanConfirm: make(chan struct{}),
|
||||
chanMessageID: make(chan int64),
|
||||
chanMessage: make(chan *messagePacket, 10),
|
||||
messageContexts: map[int64]*messageContext{},
|
||||
requestTimeout: 0,
|
||||
isTLS: isTLS,
|
||||
}
|
||||
l.wgClose.Add(1)
|
||||
return l
|
||||
}
|
||||
|
||||
// Start initializes goroutines to read responses and process messages
|
||||
func (l *Conn) Start() {
|
||||
go l.reader()
|
||||
go l.processMessages()
|
||||
}
|
||||
|
||||
// IsClosing returns whether or not we're currently closing.
|
||||
func (l *Conn) IsClosing() bool {
|
||||
return atomic.LoadUint32(&l.closing) == 1
|
||||
}
|
||||
|
||||
// setClosing sets the closing value to true
|
||||
func (l *Conn) setClosing() bool {
|
||||
return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
|
||||
}
|
||||
|
||||
// Close closes the connection.
|
||||
func (l *Conn) Close() (err error) {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
|
||||
if l.setClosing() {
|
||||
l.Debug.Printf("Sending quit message and waiting for confirmation")
|
||||
l.chanMessage <- &messagePacket{Op: MessageQuit}
|
||||
|
||||
timeoutCtx := context.Background()
|
||||
if l.getTimeout() > 0 {
|
||||
var cancelFunc context.CancelFunc
|
||||
timeoutCtx, cancelFunc = context.WithTimeout(timeoutCtx, time.Duration(l.getTimeout()))
|
||||
defer cancelFunc()
|
||||
}
|
||||
select {
|
||||
case <-l.chanConfirm:
|
||||
// Confirmation was received.
|
||||
case <-timeoutCtx.Done():
|
||||
// The timeout was reached before confirmation was received.
|
||||
}
|
||||
|
||||
close(l.chanMessage)
|
||||
|
||||
l.Debug.Printf("Closing network connection")
|
||||
err = l.conn.Close()
|
||||
l.wgClose.Done()
|
||||
}
|
||||
l.wgClose.Wait()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
|
||||
func (l *Conn) SetTimeout(timeout time.Duration) {
|
||||
atomic.StoreInt64(&l.requestTimeout, int64(timeout))
|
||||
}
|
||||
|
||||
func (l *Conn) getTimeout() int64 {
|
||||
return atomic.LoadInt64(&l.requestTimeout)
|
||||
}
|
||||
|
||||
// Returns the next available messageID
|
||||
func (l *Conn) nextMessageID() int64 {
|
||||
if messageID, ok := <-l.chanMessageID; ok {
|
||||
return messageID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetLastError returns the last recorded error from goroutines like processMessages and reader.
|
||||
// Only the last recorded error will be returned.
|
||||
func (l *Conn) GetLastError() error {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
return l.err
|
||||
}
|
||||
|
||||
// StartTLS sends the command to start a TLS session and then creates a new TLS Client
|
||||
func (l *Conn) StartTLS(config *tls.Config) error {
|
||||
if l.isTLS {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
|
||||
}
|
||||
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
|
||||
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
|
||||
packet.AppendChild(request)
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
l.Close()
|
||||
return err
|
||||
}
|
||||
l.Debug.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if err := GetLDAPError(packet); err == nil {
|
||||
conn := tls.Client(l.conn, config)
|
||||
|
||||
if connErr := conn.Handshake(); connErr != nil {
|
||||
l.Close()
|
||||
return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr))
|
||||
}
|
||||
|
||||
l.isTLS = true
|
||||
l.conn = conn
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
go l.reader()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TLSConnectionState returns the client's TLS connection state.
|
||||
// The return values are their zero values if StartTLS did
|
||||
// not succeed.
|
||||
func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
|
||||
tc, ok := l.conn.(*tls.Conn)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
return tc.ConnectionState(), true
|
||||
}
|
||||
|
||||
func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
|
||||
return l.sendMessageWithFlags(packet, 0)
|
||||
}
|
||||
|
||||
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
|
||||
if l.IsClosing() {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
|
||||
}
|
||||
l.messageMutex.Lock()
|
||||
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
|
||||
if l.isStartingTLS {
|
||||
l.messageMutex.Unlock()
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
|
||||
}
|
||||
if flags&startTLS != 0 {
|
||||
if l.outstandingRequests != 0 {
|
||||
l.messageMutex.Unlock()
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
|
||||
}
|
||||
l.isStartingTLS = true
|
||||
}
|
||||
l.outstandingRequests++
|
||||
|
||||
l.messageMutex.Unlock()
|
||||
|
||||
responses := make(chan *PacketResponse)
|
||||
messageID := packet.Children[0].Value.(int64)
|
||||
message := &messagePacket{
|
||||
Op: MessageRequest,
|
||||
MessageID: messageID,
|
||||
Packet: packet,
|
||||
Context: &messageContext{
|
||||
id: messageID,
|
||||
done: make(chan struct{}),
|
||||
responses: responses,
|
||||
},
|
||||
}
|
||||
if !l.sendProcessMessage(message) {
|
||||
if l.IsClosing() {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
|
||||
}
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: could not send message for unknown reason"))
|
||||
}
|
||||
return message.Context, nil
|
||||
}
|
||||
|
||||
func (l *Conn) finishMessage(msgCtx *messageContext) {
|
||||
close(msgCtx.done)
|
||||
|
||||
if l.IsClosing() {
|
||||
return
|
||||
}
|
||||
|
||||
l.messageMutex.Lock()
|
||||
l.outstandingRequests--
|
||||
if l.isStartingTLS {
|
||||
l.isStartingTLS = false
|
||||
}
|
||||
l.messageMutex.Unlock()
|
||||
|
||||
message := &messagePacket{
|
||||
Op: MessageFinish,
|
||||
MessageID: msgCtx.id,
|
||||
}
|
||||
l.sendProcessMessage(message)
|
||||
}
|
||||
|
||||
func (l *Conn) sendProcessMessage(message *messagePacket) bool {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
if l.IsClosing() {
|
||||
return false
|
||||
}
|
||||
l.chanMessage <- message
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *Conn) processMessages() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
l.err = fmt.Errorf("ldap: recovered panic in processMessages: %v", err)
|
||||
}
|
||||
for messageID, msgCtx := range l.messageContexts {
|
||||
// If we are closing due to an error, inform anyone who
|
||||
// is waiting about the error.
|
||||
if l.IsClosing() && l.closeErr.Load() != nil {
|
||||
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}, time.Duration(l.getTimeout()))
|
||||
}
|
||||
l.Debug.Printf("Closing channel for MessageID %d", messageID)
|
||||
close(msgCtx.responses)
|
||||
delete(l.messageContexts, messageID)
|
||||
}
|
||||
close(l.chanMessageID)
|
||||
close(l.chanConfirm)
|
||||
}()
|
||||
|
||||
var messageID int64 = 1
|
||||
for {
|
||||
select {
|
||||
case l.chanMessageID <- messageID:
|
||||
messageID++
|
||||
case message := <-l.chanMessage:
|
||||
switch message.Op {
|
||||
case MessageQuit:
|
||||
l.Debug.Printf("Shutting down - quit message received")
|
||||
return
|
||||
case MessageRequest:
|
||||
// Add to message list and write to network
|
||||
l.Debug.Printf("Sending message %d", message.MessageID)
|
||||
|
||||
buf := message.Packet.Bytes()
|
||||
_, err := l.conn.Write(buf)
|
||||
if err != nil {
|
||||
l.Debug.Printf("Error Sending Message: %s", err.Error())
|
||||
message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}, time.Duration(l.getTimeout()))
|
||||
close(message.Context.responses)
|
||||
break
|
||||
}
|
||||
|
||||
// Only add to messageContexts if we were able to
|
||||
// successfully write the message.
|
||||
l.messageContexts[message.MessageID] = message.Context
|
||||
|
||||
// Add timeout if defined
|
||||
requestTimeout := l.getTimeout()
|
||||
if requestTimeout > 0 {
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Duration(requestTimeout))
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
l.err = fmt.Errorf("ldap: recovered panic in RequestTimeout: %v", err)
|
||||
}
|
||||
|
||||
timer.Stop()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
timeoutMessage := &messagePacket{
|
||||
Op: MessageTimeout,
|
||||
MessageID: message.MessageID,
|
||||
}
|
||||
l.sendProcessMessage(timeoutMessage)
|
||||
case <-message.Context.done:
|
||||
}
|
||||
}()
|
||||
}
|
||||
case MessageResponse:
|
||||
l.Debug.Printf("Receiving message %d", message.MessageID)
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
msgCtx.sendResponse(&PacketResponse{message.Packet, nil}, time.Duration(l.getTimeout()))
|
||||
} else {
|
||||
l.err = fmt.Errorf("ldap: received unexpected message %d, %v", message.MessageID, l.IsClosing())
|
||||
l.Debug.PrintPacket(message.Packet)
|
||||
}
|
||||
case MessageTimeout:
|
||||
// Handle the timeout by closing the channel
|
||||
// All reads will return immediately
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
|
||||
msgCtx.sendResponse(&PacketResponse{message.Packet, NewError(ErrorNetwork, errors.New("ldap: connection timed out"))}, time.Duration(l.getTimeout()))
|
||||
delete(l.messageContexts, message.MessageID)
|
||||
close(msgCtx.responses)
|
||||
}
|
||||
case MessageFinish:
|
||||
l.Debug.Printf("Finished message %d", message.MessageID)
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
delete(l.messageContexts, message.MessageID)
|
||||
close(msgCtx.responses)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Conn) reader() {
|
||||
cleanstop := false
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
l.err = fmt.Errorf("ldap: recovered panic in reader: %v", err)
|
||||
}
|
||||
if !cleanstop {
|
||||
l.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
bufConn := bufio.NewReader(l.conn)
|
||||
for {
|
||||
if cleanstop {
|
||||
l.Debug.Printf("reader clean stopping (without closing the connection)")
|
||||
return
|
||||
}
|
||||
packet, err := ber.ReadPacket(bufConn)
|
||||
if err != nil {
|
||||
// A read error is expected here if we are closing the connection...
|
||||
if !l.IsClosing() {
|
||||
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
|
||||
l.Debug.Printf("reader error: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
l.Debug.Printf("descriptions error: %s", err)
|
||||
}
|
||||
if len(packet.Children) == 0 {
|
||||
l.Debug.Printf("Received bad ldap packet")
|
||||
continue
|
||||
}
|
||||
l.messageMutex.Lock()
|
||||
if l.isStartingTLS {
|
||||
cleanstop = true
|
||||
}
|
||||
l.messageMutex.Unlock()
|
||||
message := &messagePacket{
|
||||
Op: MessageResponse,
|
||||
MessageID: packet.Children[0].Value.(int64),
|
||||
Packet: packet,
|
||||
}
|
||||
if !l.sendProcessMessage(message) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
1294
vendor/github.com/go-ldap/ldap/v3/control.go
generated
vendored
Normal file
1294
vendor/github.com/go-ldap/ldap/v3/control.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
28
vendor/github.com/go-ldap/ldap/v3/debug.go
generated
vendored
Normal file
28
vendor/github.com/go-ldap/ldap/v3/debug.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// debugging type
|
||||
// - has a Printf method to write the debug output
|
||||
type debugging bool
|
||||
|
||||
// Enable controls debugging mode.
|
||||
func (debug *debugging) Enable(b bool) {
|
||||
*debug = debugging(b)
|
||||
}
|
||||
|
||||
// Printf writes debug output.
|
||||
func (debug debugging) Printf(format string, args ...interface{}) {
|
||||
if debug {
|
||||
logger.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// PrintPacket dumps a packet.
|
||||
func (debug debugging) PrintPacket(packet *ber.Packet) {
|
||||
if debug {
|
||||
ber.WritePacket(logger.Writer(), packet)
|
||||
}
|
||||
}
|
||||
59
vendor/github.com/go-ldap/ldap/v3/del.go
generated
vendored
Normal file
59
vendor/github.com/go-ldap/ldap/v3/del.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// DelRequest implements an LDAP deletion request
|
||||
type DelRequest struct {
|
||||
// DN is the name of the directory entry to delete
|
||||
DN string
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *DelRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request")
|
||||
pkt.Data.Write([]byte(req.DN))
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDelRequest creates a delete request for the given DN and controls
|
||||
func NewDelRequest(DN string, Controls []Control) *DelRequest {
|
||||
return &DelRequest{
|
||||
DN: DN,
|
||||
Controls: Controls,
|
||||
}
|
||||
}
|
||||
|
||||
// Del executes the given delete request
|
||||
func (l *Conn) Del(delRequest *DelRequest) error {
|
||||
msgCtx, err := l.doRequest(delRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationDelResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
350
vendor/github.com/go-ldap/ldap/v3/dn.go
generated
vendored
Normal file
350
vendor/github.com/go-ldap/ldap/v3/dn.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
enchex "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
|
||||
type AttributeTypeAndValue struct {
|
||||
// Type is the attribute type
|
||||
Type string
|
||||
// Value is the attribute value
|
||||
Value string
|
||||
}
|
||||
|
||||
// String returns a normalized string representation of this attribute type and
|
||||
// value pair which is the a lowercased join of the Type and Value with a "=".
|
||||
func (a *AttributeTypeAndValue) String() string {
|
||||
return strings.ToLower(a.Type) + "=" + a.encodeValue()
|
||||
}
|
||||
|
||||
func (a *AttributeTypeAndValue) encodeValue() string {
|
||||
// Normalize the value first.
|
||||
// value := strings.ToLower(a.Value)
|
||||
value := a.Value
|
||||
|
||||
encodedBuf := bytes.Buffer{}
|
||||
|
||||
escapeChar := func(c byte) {
|
||||
encodedBuf.WriteByte('\\')
|
||||
encodedBuf.WriteByte(c)
|
||||
}
|
||||
|
||||
escapeHex := func(c byte) {
|
||||
encodedBuf.WriteByte('\\')
|
||||
encodedBuf.WriteString(enchex.EncodeToString([]byte{c}))
|
||||
}
|
||||
|
||||
for i := 0; i < len(value); i++ {
|
||||
char := value[i]
|
||||
if i == 0 && char == ' ' || char == '#' {
|
||||
// Special case leading space or number sign.
|
||||
escapeChar(char)
|
||||
continue
|
||||
}
|
||||
if i == len(value)-1 && char == ' ' {
|
||||
// Special case trailing space.
|
||||
escapeChar(char)
|
||||
continue
|
||||
}
|
||||
|
||||
switch char {
|
||||
case '"', '+', ',', ';', '<', '>', '\\':
|
||||
// Each of these special characters must be escaped.
|
||||
escapeChar(char)
|
||||
continue
|
||||
}
|
||||
|
||||
if char < ' ' || char > '~' {
|
||||
// All special character escapes are handled first
|
||||
// above. All bytes less than ASCII SPACE and all bytes
|
||||
// greater than ASCII TILDE must be hex-escaped.
|
||||
escapeHex(char)
|
||||
continue
|
||||
}
|
||||
|
||||
// Any other character does not require escaping.
|
||||
encodedBuf.WriteByte(char)
|
||||
}
|
||||
|
||||
return encodedBuf.String()
|
||||
}
|
||||
|
||||
// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
|
||||
type RelativeDN struct {
|
||||
Attributes []*AttributeTypeAndValue
|
||||
}
|
||||
|
||||
// String returns a normalized string representation of this relative DN which
|
||||
// is the a join of all attributes (sorted in increasing order) with a "+".
|
||||
func (r *RelativeDN) String() string {
|
||||
attrs := make([]string, len(r.Attributes))
|
||||
for i := range r.Attributes {
|
||||
attrs[i] = r.Attributes[i].String()
|
||||
}
|
||||
sort.Strings(attrs)
|
||||
return strings.Join(attrs, "+")
|
||||
}
|
||||
|
||||
// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
|
||||
type DN struct {
|
||||
RDNs []*RelativeDN
|
||||
}
|
||||
|
||||
// String returns a normalized string representation of this DN which is the
|
||||
// join of all relative DNs with a ",".
|
||||
func (d *DN) String() string {
|
||||
rdns := make([]string, len(d.RDNs))
|
||||
for i := range d.RDNs {
|
||||
rdns[i] = d.RDNs[i].String()
|
||||
}
|
||||
return strings.Join(rdns, ",")
|
||||
}
|
||||
|
||||
// ParseDN returns a distinguishedName or an error.
|
||||
// The function respects https://tools.ietf.org/html/rfc4514
|
||||
func ParseDN(str string) (*DN, error) {
|
||||
dn := new(DN)
|
||||
dn.RDNs = make([]*RelativeDN, 0)
|
||||
rdn := new(RelativeDN)
|
||||
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
|
||||
buffer := bytes.Buffer{}
|
||||
attribute := new(AttributeTypeAndValue)
|
||||
escaping := false
|
||||
|
||||
unescapedTrailingSpaces := 0
|
||||
stringFromBuffer := func() string {
|
||||
s := buffer.String()
|
||||
s = s[0 : len(s)-unescapedTrailingSpaces]
|
||||
buffer.Reset()
|
||||
unescapedTrailingSpaces = 0
|
||||
return s
|
||||
}
|
||||
|
||||
for i := 0; i < len(str); i++ {
|
||||
char := str[i]
|
||||
switch {
|
||||
case escaping:
|
||||
unescapedTrailingSpaces = 0
|
||||
escaping = false
|
||||
switch char {
|
||||
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
|
||||
buffer.WriteByte(char)
|
||||
continue
|
||||
}
|
||||
// Not a special character, assume hex encoded octet
|
||||
if len(str) == i+1 {
|
||||
return nil, errors.New("got corrupted escaped character")
|
||||
}
|
||||
|
||||
dst := []byte{0}
|
||||
n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode escaped character: %s", err)
|
||||
} else if n != 1 {
|
||||
return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n)
|
||||
}
|
||||
buffer.WriteByte(dst[0])
|
||||
i++
|
||||
case char == '\\':
|
||||
unescapedTrailingSpaces = 0
|
||||
escaping = true
|
||||
case char == '=' && attribute.Type == "":
|
||||
attribute.Type = stringFromBuffer()
|
||||
// Special case: If the first character in the value is # the
|
||||
// following data is BER encoded so we can just fast forward
|
||||
// and decode.
|
||||
if len(str) > i+1 && str[i+1] == '#' {
|
||||
i += 2
|
||||
index := strings.IndexAny(str[i:], ",+")
|
||||
var data string
|
||||
if index > 0 {
|
||||
data = str[i : i+index]
|
||||
} else {
|
||||
data = str[i:]
|
||||
}
|
||||
rawBER, err := enchex.DecodeString(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode BER encoding: %s", err)
|
||||
}
|
||||
packet, err := ber.DecodePacketErr(rawBER)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode BER packet: %s", err)
|
||||
}
|
||||
buffer.WriteString(packet.Data.String())
|
||||
i += len(data) - 1
|
||||
}
|
||||
case char == ',' || char == '+' || char == ';':
|
||||
// We're done with this RDN or value, push it
|
||||
if len(attribute.Type) == 0 {
|
||||
return nil, errors.New("incomplete type, value pair")
|
||||
}
|
||||
attribute.Value = stringFromBuffer()
|
||||
rdn.Attributes = append(rdn.Attributes, attribute)
|
||||
attribute = new(AttributeTypeAndValue)
|
||||
if char == ',' || char == ';' {
|
||||
dn.RDNs = append(dn.RDNs, rdn)
|
||||
rdn = new(RelativeDN)
|
||||
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
|
||||
}
|
||||
case char == ' ' && buffer.Len() == 0:
|
||||
// ignore unescaped leading spaces
|
||||
continue
|
||||
default:
|
||||
if char == ' ' {
|
||||
// Track unescaped spaces in case they are trailing and we need to remove them
|
||||
unescapedTrailingSpaces++
|
||||
} else {
|
||||
// Reset if we see a non-space char
|
||||
unescapedTrailingSpaces = 0
|
||||
}
|
||||
buffer.WriteByte(char)
|
||||
}
|
||||
}
|
||||
if buffer.Len() > 0 {
|
||||
if len(attribute.Type) == 0 {
|
||||
return nil, errors.New("DN ended with incomplete type, value pair")
|
||||
}
|
||||
attribute.Value = stringFromBuffer()
|
||||
rdn.Attributes = append(rdn.Attributes, attribute)
|
||||
dn.RDNs = append(dn.RDNs, rdn)
|
||||
}
|
||||
return dn, nil
|
||||
}
|
||||
|
||||
// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Returns true if they have the same number of relative distinguished names
|
||||
// and corresponding relative distinguished names (by position) are the same.
|
||||
func (d *DN) Equal(other *DN) bool {
|
||||
if len(d.RDNs) != len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].Equal(other.RDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
|
||||
// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
|
||||
// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
|
||||
// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
|
||||
func (d *DN) AncestorOf(other *DN) bool {
|
||||
if len(d.RDNs) >= len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
|
||||
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].Equal(otherRDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
|
||||
// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
|
||||
// The order of attributes is not significant.
|
||||
// Case of attribute types is not significant.
|
||||
func (r *RelativeDN) Equal(other *RelativeDN) bool {
|
||||
if len(r.Attributes) != len(other.Attributes) {
|
||||
return false
|
||||
}
|
||||
return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
|
||||
}
|
||||
|
||||
func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
|
||||
for _, attr := range attrs {
|
||||
found := false
|
||||
for _, myattr := range r.Attributes {
|
||||
if myattr.Equal(attr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
|
||||
// Case of the attribute type is not significant
|
||||
func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
|
||||
return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
|
||||
}
|
||||
|
||||
// EqualFold returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Returns true if they have the same number of relative distinguished names
|
||||
// and corresponding relative distinguished names (by position) are the same.
|
||||
// Case of the attribute type and value is not significant
|
||||
func (d *DN) EqualFold(other *DN) bool {
|
||||
if len(d.RDNs) != len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].EqualFold(other.RDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AncestorOfFold returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
|
||||
// Case of the attribute type and value is not significant
|
||||
func (d *DN) AncestorOfFold(other *DN) bool {
|
||||
if len(d.RDNs) >= len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
|
||||
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].EqualFold(otherRDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFold returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Case of the attribute type is not significant
|
||||
func (r *RelativeDN) EqualFold(other *RelativeDN) bool {
|
||||
if len(r.Attributes) != len(other.Attributes) {
|
||||
return false
|
||||
}
|
||||
return r.hasAllAttributesFold(other.Attributes) && other.hasAllAttributesFold(r.Attributes)
|
||||
}
|
||||
|
||||
func (r *RelativeDN) hasAllAttributesFold(attrs []*AttributeTypeAndValue) bool {
|
||||
for _, attr := range attrs {
|
||||
found := false
|
||||
for _, myattr := range r.Attributes {
|
||||
if myattr.EqualFold(attr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFold returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
|
||||
// Case of the attribute type and value is not significant
|
||||
func (a *AttributeTypeAndValue) EqualFold(other *AttributeTypeAndValue) bool {
|
||||
return strings.EqualFold(a.Type, other.Type) && strings.EqualFold(a.Value, other.Value)
|
||||
}
|
||||
4
vendor/github.com/go-ldap/ldap/v3/doc.go
generated
vendored
Normal file
4
vendor/github.com/go-ldap/ldap/v3/doc.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
/*
|
||||
Package ldap provides basic LDAP v3 functionality.
|
||||
*/
|
||||
package ldap
|
||||
261
vendor/github.com/go-ldap/ldap/v3/error.go
generated
vendored
Normal file
261
vendor/github.com/go-ldap/ldap/v3/error.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// LDAP Result Codes
|
||||
const (
|
||||
LDAPResultSuccess = 0
|
||||
LDAPResultOperationsError = 1
|
||||
LDAPResultProtocolError = 2
|
||||
LDAPResultTimeLimitExceeded = 3
|
||||
LDAPResultSizeLimitExceeded = 4
|
||||
LDAPResultCompareFalse = 5
|
||||
LDAPResultCompareTrue = 6
|
||||
LDAPResultAuthMethodNotSupported = 7
|
||||
LDAPResultStrongAuthRequired = 8
|
||||
LDAPResultReferral = 10
|
||||
LDAPResultAdminLimitExceeded = 11
|
||||
LDAPResultUnavailableCriticalExtension = 12
|
||||
LDAPResultConfidentialityRequired = 13
|
||||
LDAPResultSaslBindInProgress = 14
|
||||
LDAPResultNoSuchAttribute = 16
|
||||
LDAPResultUndefinedAttributeType = 17
|
||||
LDAPResultInappropriateMatching = 18
|
||||
LDAPResultConstraintViolation = 19
|
||||
LDAPResultAttributeOrValueExists = 20
|
||||
LDAPResultInvalidAttributeSyntax = 21
|
||||
LDAPResultNoSuchObject = 32
|
||||
LDAPResultAliasProblem = 33
|
||||
LDAPResultInvalidDNSyntax = 34
|
||||
LDAPResultIsLeaf = 35
|
||||
LDAPResultAliasDereferencingProblem = 36
|
||||
LDAPResultInappropriateAuthentication = 48
|
||||
LDAPResultInvalidCredentials = 49
|
||||
LDAPResultInsufficientAccessRights = 50
|
||||
LDAPResultBusy = 51
|
||||
LDAPResultUnavailable = 52
|
||||
LDAPResultUnwillingToPerform = 53
|
||||
LDAPResultLoopDetect = 54
|
||||
LDAPResultSortControlMissing = 60
|
||||
LDAPResultOffsetRangeError = 61
|
||||
LDAPResultNamingViolation = 64
|
||||
LDAPResultObjectClassViolation = 65
|
||||
LDAPResultNotAllowedOnNonLeaf = 66
|
||||
LDAPResultNotAllowedOnRDN = 67
|
||||
LDAPResultEntryAlreadyExists = 68
|
||||
LDAPResultObjectClassModsProhibited = 69
|
||||
LDAPResultResultsTooLarge = 70
|
||||
LDAPResultAffectsMultipleDSAs = 71
|
||||
LDAPResultVirtualListViewErrorOrControlError = 76
|
||||
LDAPResultOther = 80
|
||||
LDAPResultServerDown = 81
|
||||
LDAPResultLocalError = 82
|
||||
LDAPResultEncodingError = 83
|
||||
LDAPResultDecodingError = 84
|
||||
LDAPResultTimeout = 85
|
||||
LDAPResultAuthUnknown = 86
|
||||
LDAPResultFilterError = 87
|
||||
LDAPResultUserCanceled = 88
|
||||
LDAPResultParamError = 89
|
||||
LDAPResultNoMemory = 90
|
||||
LDAPResultConnectError = 91
|
||||
LDAPResultNotSupported = 92
|
||||
LDAPResultControlNotFound = 93
|
||||
LDAPResultNoResultsReturned = 94
|
||||
LDAPResultMoreResultsToReturn = 95
|
||||
LDAPResultClientLoop = 96
|
||||
LDAPResultReferralLimitExceeded = 97
|
||||
LDAPResultInvalidResponse = 100
|
||||
LDAPResultAmbiguousResponse = 101
|
||||
LDAPResultTLSNotSupported = 112
|
||||
LDAPResultIntermediateResponse = 113
|
||||
LDAPResultUnknownType = 114
|
||||
LDAPResultCanceled = 118
|
||||
LDAPResultNoSuchOperation = 119
|
||||
LDAPResultTooLate = 120
|
||||
LDAPResultCannotCancel = 121
|
||||
LDAPResultAssertionFailed = 122
|
||||
LDAPResultAuthorizationDenied = 123
|
||||
LDAPResultSyncRefreshRequired = 4096
|
||||
|
||||
ErrorNetwork = 200
|
||||
ErrorFilterCompile = 201
|
||||
ErrorFilterDecompile = 202
|
||||
ErrorDebugging = 203
|
||||
ErrorUnexpectedMessage = 204
|
||||
ErrorUnexpectedResponse = 205
|
||||
ErrorEmptyPassword = 206
|
||||
)
|
||||
|
||||
// LDAPResultCodeMap contains string descriptions for LDAP error codes
|
||||
var LDAPResultCodeMap = map[uint16]string{
|
||||
LDAPResultSuccess: "Success",
|
||||
LDAPResultOperationsError: "Operations Error",
|
||||
LDAPResultProtocolError: "Protocol Error",
|
||||
LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
|
||||
LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
|
||||
LDAPResultCompareFalse: "Compare False",
|
||||
LDAPResultCompareTrue: "Compare True",
|
||||
LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
|
||||
LDAPResultStrongAuthRequired: "Strong Auth Required",
|
||||
LDAPResultReferral: "Referral",
|
||||
LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
|
||||
LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
|
||||
LDAPResultConfidentialityRequired: "Confidentiality Required",
|
||||
LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
|
||||
LDAPResultNoSuchAttribute: "No Such Attribute",
|
||||
LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
|
||||
LDAPResultInappropriateMatching: "Inappropriate Matching",
|
||||
LDAPResultConstraintViolation: "Constraint Violation",
|
||||
LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
|
||||
LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
|
||||
LDAPResultNoSuchObject: "No Such Object",
|
||||
LDAPResultAliasProblem: "Alias Problem",
|
||||
LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
|
||||
LDAPResultIsLeaf: "Is Leaf",
|
||||
LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
|
||||
LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
|
||||
LDAPResultInvalidCredentials: "Invalid Credentials",
|
||||
LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
|
||||
LDAPResultBusy: "Busy",
|
||||
LDAPResultUnavailable: "Unavailable",
|
||||
LDAPResultUnwillingToPerform: "Unwilling To Perform",
|
||||
LDAPResultLoopDetect: "Loop Detect",
|
||||
LDAPResultSortControlMissing: "Sort Control Missing",
|
||||
LDAPResultOffsetRangeError: "Result Offset Range Error",
|
||||
LDAPResultNamingViolation: "Naming Violation",
|
||||
LDAPResultObjectClassViolation: "Object Class Violation",
|
||||
LDAPResultResultsTooLarge: "Results Too Large",
|
||||
LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
|
||||
LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
|
||||
LDAPResultEntryAlreadyExists: "Entry Already Exists",
|
||||
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
|
||||
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
|
||||
LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view",
|
||||
LDAPResultOther: "Other",
|
||||
LDAPResultServerDown: "Cannot establish a connection",
|
||||
LDAPResultLocalError: "An error occurred",
|
||||
LDAPResultEncodingError: "LDAP encountered an error while encoding",
|
||||
LDAPResultDecodingError: "LDAP encountered an error while decoding",
|
||||
LDAPResultTimeout: "LDAP timeout while waiting for a response from the server",
|
||||
LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown",
|
||||
LDAPResultFilterError: "An error occurred while encoding the given search filter",
|
||||
LDAPResultUserCanceled: "The user canceled the operation",
|
||||
LDAPResultParamError: "An invalid parameter was specified",
|
||||
LDAPResultNoMemory: "Out of memory error",
|
||||
LDAPResultConnectError: "A connection to the server could not be established",
|
||||
LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP",
|
||||
LDAPResultControlNotFound: "The controls required to perform the requested operation were not found",
|
||||
LDAPResultNoResultsReturned: "No results were returned from the server",
|
||||
LDAPResultMoreResultsToReturn: "There are more results in the chain of results",
|
||||
LDAPResultClientLoop: "A loop has been detected. For example when following referrals",
|
||||
LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded",
|
||||
LDAPResultCanceled: "Operation was canceled",
|
||||
LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation",
|
||||
LDAPResultTooLate: "Too late to cancel the outstanding operation",
|
||||
LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed",
|
||||
LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed",
|
||||
LDAPResultSyncRefreshRequired: "Refresh Required",
|
||||
LDAPResultInvalidResponse: "Invalid Response",
|
||||
LDAPResultAmbiguousResponse: "Ambiguous Response",
|
||||
LDAPResultTLSNotSupported: "Tls Not Supported",
|
||||
LDAPResultIntermediateResponse: "Intermediate Response",
|
||||
LDAPResultUnknownType: "Unknown Type",
|
||||
LDAPResultAuthorizationDenied: "Authorization Denied",
|
||||
|
||||
ErrorNetwork: "Network Error",
|
||||
ErrorFilterCompile: "Filter Compile Error",
|
||||
ErrorFilterDecompile: "Filter Decompile Error",
|
||||
ErrorDebugging: "Debugging Error",
|
||||
ErrorUnexpectedMessage: "Unexpected Message",
|
||||
ErrorUnexpectedResponse: "Unexpected Response",
|
||||
ErrorEmptyPassword: "Empty password not allowed by the client",
|
||||
}
|
||||
|
||||
// Error holds LDAP error information
|
||||
type Error struct {
|
||||
// Err is the underlying error
|
||||
Err error
|
||||
// ResultCode is the LDAP error code
|
||||
ResultCode uint16
|
||||
// MatchedDN is the matchedDN returned if any
|
||||
MatchedDN string
|
||||
// Packet is the returned packet if any
|
||||
Packet *ber.Packet
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
|
||||
}
|
||||
|
||||
func (e *Error) Unwrap() error { return e.Err }
|
||||
|
||||
// GetLDAPError creates an Error out of a BER packet representing a LDAPResult
|
||||
// The return is an error object. It can be casted to a Error structure.
|
||||
// This function returns nil if resultCode in the LDAPResult sequence is success(0).
|
||||
func GetLDAPError(packet *ber.Packet) error {
|
||||
if packet == nil {
|
||||
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")}
|
||||
}
|
||||
|
||||
if len(packet.Children) >= 2 {
|
||||
response := packet.Children[1]
|
||||
if response == nil {
|
||||
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet"), Packet: packet}
|
||||
}
|
||||
if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
|
||||
if ber.Type(response.Children[0].Tag) == ber.Type(ber.TagInteger) || ber.Type(response.Children[0].Tag) == ber.Type(ber.TagEnumerated) {
|
||||
resultCode := uint16(response.Children[0].Value.(int64))
|
||||
if resultCode == 0 { // No error
|
||||
return nil
|
||||
}
|
||||
|
||||
if ber.Type(response.Children[1].Tag) == ber.Type(ber.TagOctetString) &&
|
||||
ber.Type(response.Children[2].Tag) == ber.Type(ber.TagOctetString) {
|
||||
return &Error{
|
||||
ResultCode: resultCode,
|
||||
MatchedDN: response.Children[1].Value.(string),
|
||||
Err: fmt.Errorf("%s", response.Children[2].Value.(string)),
|
||||
Packet: packet,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format"), Packet: packet}
|
||||
}
|
||||
|
||||
// NewError creates an LDAP error with the given code and underlying error
|
||||
func NewError(resultCode uint16, err error) error {
|
||||
return &Error{ResultCode: resultCode, Err: err}
|
||||
}
|
||||
|
||||
// IsErrorAnyOf returns true if the given error is an LDAP error with any one of the given result codes
|
||||
func IsErrorAnyOf(err error, codes ...uint16) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
serverError, ok := err.(*Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, code := range codes {
|
||||
if serverError.ResultCode == code {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
|
||||
func IsErrorWithCode(err error, desiredResultCode uint16) bool {
|
||||
return IsErrorAnyOf(err, desiredResultCode)
|
||||
}
|
||||
486
vendor/github.com/go-ldap/ldap/v3/filter.go
generated
vendored
Normal file
486
vendor/github.com/go-ldap/ldap/v3/filter.go
generated
vendored
Normal file
@@ -0,0 +1,486 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
hexpac "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Filter choices
|
||||
const (
|
||||
FilterAnd = 0
|
||||
FilterOr = 1
|
||||
FilterNot = 2
|
||||
FilterEqualityMatch = 3
|
||||
FilterSubstrings = 4
|
||||
FilterGreaterOrEqual = 5
|
||||
FilterLessOrEqual = 6
|
||||
FilterPresent = 7
|
||||
FilterApproxMatch = 8
|
||||
FilterExtensibleMatch = 9
|
||||
)
|
||||
|
||||
// FilterMap contains human readable descriptions of Filter choices
|
||||
var FilterMap = map[uint64]string{
|
||||
FilterAnd: "And",
|
||||
FilterOr: "Or",
|
||||
FilterNot: "Not",
|
||||
FilterEqualityMatch: "Equality Match",
|
||||
FilterSubstrings: "Substrings",
|
||||
FilterGreaterOrEqual: "Greater Or Equal",
|
||||
FilterLessOrEqual: "Less Or Equal",
|
||||
FilterPresent: "Present",
|
||||
FilterApproxMatch: "Approx Match",
|
||||
FilterExtensibleMatch: "Extensible Match",
|
||||
}
|
||||
|
||||
// SubstringFilter options
|
||||
const (
|
||||
FilterSubstringsInitial = 0
|
||||
FilterSubstringsAny = 1
|
||||
FilterSubstringsFinal = 2
|
||||
)
|
||||
|
||||
// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
|
||||
var FilterSubstringsMap = map[uint64]string{
|
||||
FilterSubstringsInitial: "Substrings Initial",
|
||||
FilterSubstringsAny: "Substrings Any",
|
||||
FilterSubstringsFinal: "Substrings Final",
|
||||
}
|
||||
|
||||
// MatchingRuleAssertion choices
|
||||
const (
|
||||
MatchingRuleAssertionMatchingRule = 1
|
||||
MatchingRuleAssertionType = 2
|
||||
MatchingRuleAssertionMatchValue = 3
|
||||
MatchingRuleAssertionDNAttributes = 4
|
||||
)
|
||||
|
||||
// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
|
||||
var MatchingRuleAssertionMap = map[uint64]string{
|
||||
MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
|
||||
MatchingRuleAssertionType: "Matching Rule Assertion Type",
|
||||
MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
|
||||
MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
|
||||
}
|
||||
|
||||
var _SymbolAny = []byte{'*'}
|
||||
|
||||
// CompileFilter converts a string representation of a filter into a BER-encoded packet
|
||||
func CompileFilter(filter string) (*ber.Packet, error) {
|
||||
if len(filter) == 0 || filter[0] != '(' {
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
|
||||
}
|
||||
packet, pos, err := compileFilter(filter, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case pos > len(filter):
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
case pos < len(filter):
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
|
||||
}
|
||||
return packet, nil
|
||||
}
|
||||
|
||||
// DecompileFilter converts a packet representation of a filter into a string representation
|
||||
func DecompileFilter(packet *ber.Packet) (_ string, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
|
||||
}
|
||||
}()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteByte('(')
|
||||
childStr := ""
|
||||
|
||||
switch packet.Tag {
|
||||
case FilterAnd:
|
||||
buf.WriteByte('&')
|
||||
for _, child := range packet.Children {
|
||||
childStr, err = DecompileFilter(child)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf.WriteString(childStr)
|
||||
}
|
||||
case FilterOr:
|
||||
buf.WriteByte('|')
|
||||
for _, child := range packet.Children {
|
||||
childStr, err = DecompileFilter(child)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf.WriteString(childStr)
|
||||
}
|
||||
case FilterNot:
|
||||
buf.WriteByte('!')
|
||||
childStr, err = DecompileFilter(packet.Children[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf.WriteString(childStr)
|
||||
|
||||
case FilterSubstrings:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteByte('=')
|
||||
for i, child := range packet.Children[1].Children {
|
||||
if i == 0 && child.Tag != FilterSubstringsInitial {
|
||||
buf.Write(_SymbolAny)
|
||||
}
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(child.Data.Bytes())))
|
||||
if child.Tag != FilterSubstringsFinal {
|
||||
buf.Write(_SymbolAny)
|
||||
}
|
||||
}
|
||||
case FilterEqualityMatch:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterGreaterOrEqual:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteString(">=")
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterLessOrEqual:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteString("<=")
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterPresent:
|
||||
buf.WriteString(ber.DecodeString(packet.Data.Bytes()))
|
||||
buf.WriteString("=*")
|
||||
case FilterApproxMatch:
|
||||
buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
|
||||
buf.WriteString("~=")
|
||||
buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
|
||||
case FilterExtensibleMatch:
|
||||
attr := ""
|
||||
dnAttributes := false
|
||||
matchingRule := ""
|
||||
value := ""
|
||||
|
||||
for _, child := range packet.Children {
|
||||
switch child.Tag {
|
||||
case MatchingRuleAssertionMatchingRule:
|
||||
matchingRule = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionType:
|
||||
attr = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionMatchValue:
|
||||
value = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionDNAttributes:
|
||||
dnAttributes = child.Value.(bool)
|
||||
}
|
||||
}
|
||||
|
||||
if len(attr) > 0 {
|
||||
buf.WriteString(attr)
|
||||
}
|
||||
if dnAttributes {
|
||||
buf.WriteString(":dn")
|
||||
}
|
||||
if len(matchingRule) > 0 {
|
||||
buf.WriteString(":")
|
||||
buf.WriteString(matchingRule)
|
||||
}
|
||||
buf.WriteString(":=")
|
||||
buf.WriteString(EscapeFilter(value))
|
||||
}
|
||||
|
||||
buf.WriteByte(')')
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
|
||||
for pos < len(filter) && filter[pos] == '(' {
|
||||
child, newPos, err := compileFilter(filter, pos+1)
|
||||
if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
pos = newPos
|
||||
parent.AppendChild(child)
|
||||
}
|
||||
if pos == len(filter) {
|
||||
return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
}
|
||||
|
||||
return pos + 1, nil
|
||||
}
|
||||
|
||||
func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
|
||||
var (
|
||||
packet *ber.Packet
|
||||
err error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
|
||||
}
|
||||
}()
|
||||
newPos := pos
|
||||
|
||||
currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
|
||||
|
||||
switch currentRune {
|
||||
case utf8.RuneError:
|
||||
return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
|
||||
case '(':
|
||||
packet, newPos, err = compileFilter(filter, pos+currentWidth)
|
||||
newPos++
|
||||
return packet, newPos, err
|
||||
case '&':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
|
||||
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
|
||||
return packet, newPos, err
|
||||
case '|':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
|
||||
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
|
||||
return packet, newPos, err
|
||||
case '!':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
|
||||
var child *ber.Packet
|
||||
child, newPos, err = compileFilter(filter, pos+currentWidth)
|
||||
packet.AppendChild(child)
|
||||
return packet, newPos, err
|
||||
default:
|
||||
const (
|
||||
stateReadingAttr = 0
|
||||
stateReadingExtensibleMatchingRule = 1
|
||||
stateReadingCondition = 2
|
||||
)
|
||||
|
||||
state := stateReadingAttr
|
||||
attribute := bytes.NewBuffer(nil)
|
||||
extensibleDNAttributes := false
|
||||
extensibleMatchingRule := bytes.NewBuffer(nil)
|
||||
condition := bytes.NewBuffer(nil)
|
||||
|
||||
for newPos < len(filter) {
|
||||
remainingFilter := filter[newPos:]
|
||||
currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
|
||||
if currentRune == ')' {
|
||||
break
|
||||
}
|
||||
if currentRune == utf8.RuneError {
|
||||
return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
|
||||
}
|
||||
|
||||
switch state {
|
||||
case stateReadingAttr:
|
||||
switch {
|
||||
// Extensible rule, with only DN-matching
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
extensibleDNAttributes = true
|
||||
state = stateReadingCondition
|
||||
newPos += 5
|
||||
|
||||
// Extensible rule, with DN-matching and a matching OID
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
extensibleDNAttributes = true
|
||||
state = stateReadingExtensibleMatchingRule
|
||||
newPos += 4
|
||||
|
||||
// Extensible rule, with attr only
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Extensible rule, with no DN attribute matching
|
||||
case currentRune == ':':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
state = stateReadingExtensibleMatchingRule
|
||||
newPos++
|
||||
|
||||
// Equality condition
|
||||
case currentRune == '=':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
|
||||
state = stateReadingCondition
|
||||
newPos++
|
||||
|
||||
// Greater-than or equal
|
||||
case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Less-than or equal
|
||||
case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Approx
|
||||
case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Still reading the attribute name
|
||||
default:
|
||||
attribute.WriteRune(currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
|
||||
case stateReadingExtensibleMatchingRule:
|
||||
switch {
|
||||
|
||||
// Matching rule OID is done
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Still reading the matching rule oid
|
||||
default:
|
||||
extensibleMatchingRule.WriteRune(currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
|
||||
case stateReadingCondition:
|
||||
// append to the condition
|
||||
condition.WriteRune(currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
}
|
||||
|
||||
if newPos == len(filter) {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
return packet, newPos, err
|
||||
}
|
||||
if packet == nil {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
|
||||
return packet, newPos, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case packet.Tag == FilterExtensibleMatch:
|
||||
// MatchingRuleAssertion ::= SEQUENCE {
|
||||
// matchingRule [1] MatchingRuleID OPTIONAL,
|
||||
// type [2] AttributeDescription OPTIONAL,
|
||||
// matchValue [3] AssertionValue,
|
||||
// dnAttributes [4] BOOLEAN DEFAULT FALSE
|
||||
// }
|
||||
|
||||
// Include the matching rule oid, if specified
|
||||
if extensibleMatchingRule.Len() > 0 {
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule.String(), MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
|
||||
}
|
||||
|
||||
// Include the attribute, if specified
|
||||
if attribute.Len() > 0 {
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute.String(), MatchingRuleAssertionMap[MatchingRuleAssertionType]))
|
||||
}
|
||||
|
||||
// Add the value (only required child)
|
||||
encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes())
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
|
||||
|
||||
// Defaults to false, so only include in the sequence if true
|
||||
if extensibleDNAttributes {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
|
||||
}
|
||||
|
||||
case packet.Tag == FilterEqualityMatch && bytes.Equal(condition.Bytes(), _SymbolAny):
|
||||
packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute.String(), FilterMap[FilterPresent])
|
||||
case packet.Tag == FilterEqualityMatch && bytes.Contains(condition.Bytes(), _SymbolAny):
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute"))
|
||||
packet.Tag = FilterSubstrings
|
||||
packet.Description = FilterMap[uint64(packet.Tag)]
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
|
||||
parts := bytes.Split(condition.Bytes(), _SymbolAny)
|
||||
for i, part := range parts {
|
||||
if len(part) == 0 {
|
||||
continue
|
||||
}
|
||||
var tag ber.Tag
|
||||
switch i {
|
||||
case 0:
|
||||
tag = FilterSubstringsInitial
|
||||
case len(parts) - 1:
|
||||
tag = FilterSubstringsFinal
|
||||
default:
|
||||
tag = FilterSubstringsAny
|
||||
}
|
||||
encodedString, encodeErr := decodeEscapedSymbols(part)
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
|
||||
}
|
||||
packet.AppendChild(seq)
|
||||
default:
|
||||
encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes())
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute"))
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
|
||||
}
|
||||
|
||||
newPos += currentWidth
|
||||
return packet, newPos, err
|
||||
}
|
||||
}
|
||||
|
||||
// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
|
||||
func decodeEscapedSymbols(src []byte) (string, error) {
|
||||
var (
|
||||
buffer bytes.Buffer
|
||||
offset int
|
||||
reader = bytes.NewReader(src)
|
||||
byteHex []byte
|
||||
byteVal []byte
|
||||
)
|
||||
|
||||
for {
|
||||
runeVal, runeSize, err := reader.ReadRune()
|
||||
if err == io.EOF {
|
||||
return buffer.String(), nil
|
||||
} else if err != nil {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: failed to read filter: %v", err))
|
||||
} else if runeVal == unicode.ReplacementChar {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", offset))
|
||||
}
|
||||
|
||||
if runeVal == '\\' {
|
||||
// http://tools.ietf.org/search/rfc4515
|
||||
// \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
|
||||
// being a member of UTF1SUBSET.
|
||||
if byteHex == nil {
|
||||
byteHex = make([]byte, 2)
|
||||
byteVal = make([]byte, 1)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(reader, byteHex); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
|
||||
}
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err))
|
||||
}
|
||||
|
||||
if _, err := hexpac.Decode(byteVal, byteHex); err != nil {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err))
|
||||
}
|
||||
|
||||
buffer.Write(byteVal)
|
||||
} else {
|
||||
buffer.WriteRune(runeVal)
|
||||
}
|
||||
|
||||
offset += runeSize
|
||||
}
|
||||
}
|
||||
390
vendor/github.com/go-ldap/ldap/v3/ldap.go
generated
vendored
Normal file
390
vendor/github.com/go-ldap/ldap/v3/ldap.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// LDAP Application Codes
|
||||
const (
|
||||
ApplicationBindRequest = 0
|
||||
ApplicationBindResponse = 1
|
||||
ApplicationUnbindRequest = 2
|
||||
ApplicationSearchRequest = 3
|
||||
ApplicationSearchResultEntry = 4
|
||||
ApplicationSearchResultDone = 5
|
||||
ApplicationModifyRequest = 6
|
||||
ApplicationModifyResponse = 7
|
||||
ApplicationAddRequest = 8
|
||||
ApplicationAddResponse = 9
|
||||
ApplicationDelRequest = 10
|
||||
ApplicationDelResponse = 11
|
||||
ApplicationModifyDNRequest = 12
|
||||
ApplicationModifyDNResponse = 13
|
||||
ApplicationCompareRequest = 14
|
||||
ApplicationCompareResponse = 15
|
||||
ApplicationAbandonRequest = 16
|
||||
ApplicationSearchResultReference = 19
|
||||
ApplicationExtendedRequest = 23
|
||||
ApplicationExtendedResponse = 24
|
||||
ApplicationIntermediateResponse = 25
|
||||
)
|
||||
|
||||
// ApplicationMap contains human readable descriptions of LDAP Application Codes
|
||||
var ApplicationMap = map[uint8]string{
|
||||
ApplicationBindRequest: "Bind Request",
|
||||
ApplicationBindResponse: "Bind Response",
|
||||
ApplicationUnbindRequest: "Unbind Request",
|
||||
ApplicationSearchRequest: "Search Request",
|
||||
ApplicationSearchResultEntry: "Search Result Entry",
|
||||
ApplicationSearchResultDone: "Search Result Done",
|
||||
ApplicationModifyRequest: "Modify Request",
|
||||
ApplicationModifyResponse: "Modify Response",
|
||||
ApplicationAddRequest: "Add Request",
|
||||
ApplicationAddResponse: "Add Response",
|
||||
ApplicationDelRequest: "Del Request",
|
||||
ApplicationDelResponse: "Del Response",
|
||||
ApplicationModifyDNRequest: "Modify DN Request",
|
||||
ApplicationModifyDNResponse: "Modify DN Response",
|
||||
ApplicationCompareRequest: "Compare Request",
|
||||
ApplicationCompareResponse: "Compare Response",
|
||||
ApplicationAbandonRequest: "Abandon Request",
|
||||
ApplicationSearchResultReference: "Search Result Reference",
|
||||
ApplicationExtendedRequest: "Extended Request",
|
||||
ApplicationExtendedResponse: "Extended Response",
|
||||
ApplicationIntermediateResponse: "Intermediate Response",
|
||||
}
|
||||
|
||||
// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
|
||||
const (
|
||||
BeheraPasswordExpired = 0
|
||||
BeheraAccountLocked = 1
|
||||
BeheraChangeAfterReset = 2
|
||||
BeheraPasswordModNotAllowed = 3
|
||||
BeheraMustSupplyOldPassword = 4
|
||||
BeheraInsufficientPasswordQuality = 5
|
||||
BeheraPasswordTooShort = 6
|
||||
BeheraPasswordTooYoung = 7
|
||||
BeheraPasswordInHistory = 8
|
||||
)
|
||||
|
||||
// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
|
||||
var BeheraPasswordPolicyErrorMap = map[int8]string{
|
||||
BeheraPasswordExpired: "Password expired",
|
||||
BeheraAccountLocked: "Account locked",
|
||||
BeheraChangeAfterReset: "Password must be changed",
|
||||
BeheraPasswordModNotAllowed: "Policy prevents password modification",
|
||||
BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
|
||||
BeheraInsufficientPasswordQuality: "Password fails quality checks",
|
||||
BeheraPasswordTooShort: "Password is too short for policy",
|
||||
BeheraPasswordTooYoung: "Password has been changed too recently",
|
||||
BeheraPasswordInHistory: "New password is in list of old passwords",
|
||||
}
|
||||
|
||||
var logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// Logger allows clients to override the default logger
|
||||
func Logger(l *log.Logger) {
|
||||
logger = l
|
||||
}
|
||||
|
||||
// Adds descriptions to an LDAP Response packet for debugging
|
||||
func addLDAPDescriptions(packet *ber.Packet) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r))
|
||||
}
|
||||
}()
|
||||
packet.Description = "LDAP Response"
|
||||
packet.Children[0].Description = "Message ID"
|
||||
|
||||
application := uint8(packet.Children[1].Tag)
|
||||
packet.Children[1].Description = ApplicationMap[application]
|
||||
|
||||
switch application {
|
||||
case ApplicationBindRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationBindResponse:
|
||||
err = addDefaultLDAPResponseDescriptions(packet)
|
||||
case ApplicationUnbindRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationSearchRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationSearchResultEntry:
|
||||
packet.Children[1].Children[0].Description = "Object Name"
|
||||
packet.Children[1].Children[1].Description = "Attributes"
|
||||
for _, child := range packet.Children[1].Children[1].Children {
|
||||
child.Description = "Attribute"
|
||||
child.Children[0].Description = "Attribute Name"
|
||||
child.Children[1].Description = "Attribute Values"
|
||||
for _, grandchild := range child.Children[1].Children {
|
||||
grandchild.Description = "Attribute Value"
|
||||
}
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
err = addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
case ApplicationSearchResultDone:
|
||||
err = addDefaultLDAPResponseDescriptions(packet)
|
||||
case ApplicationModifyRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationModifyResponse:
|
||||
case ApplicationAddRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationAddResponse:
|
||||
case ApplicationDelRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationDelResponse:
|
||||
case ApplicationModifyDNRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationModifyDNResponse:
|
||||
case ApplicationCompareRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationCompareResponse:
|
||||
case ApplicationAbandonRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationSearchResultReference:
|
||||
case ApplicationExtendedRequest:
|
||||
err = addRequestDescriptions(packet)
|
||||
case ApplicationExtendedResponse:
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func addControlDescriptions(packet *ber.Packet) error {
|
||||
packet.Description = "Controls"
|
||||
for _, child := range packet.Children {
|
||||
var value *ber.Packet
|
||||
controlType := ""
|
||||
child.Description = "Control"
|
||||
switch len(child.Children) {
|
||||
case 0:
|
||||
// at least one child is required for control type
|
||||
return fmt.Errorf("at least one child is required for control type")
|
||||
|
||||
case 1:
|
||||
// just type, no criticality or value
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
|
||||
case 2:
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
// Children[1] could be criticality or value (both are optional)
|
||||
// duck-type on whether this is a boolean
|
||||
if _, ok := child.Children[1].Value.(bool); ok {
|
||||
child.Children[1].Description = "Criticality"
|
||||
} else {
|
||||
child.Children[1].Description = "Control Value"
|
||||
value = child.Children[1]
|
||||
}
|
||||
|
||||
case 3:
|
||||
// criticality and value present
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
child.Children[1].Description = "Criticality"
|
||||
child.Children[2].Description = "Control Value"
|
||||
value = child.Children[2]
|
||||
|
||||
default:
|
||||
// more than 3 children is invalid
|
||||
return fmt.Errorf("more than 3 children for control packet found")
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
switch controlType {
|
||||
case ControlTypePaging:
|
||||
value.Description += " (Paging)"
|
||||
if value.Value != nil {
|
||||
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
value.Children[0].Description = "Real Search Control Value"
|
||||
value.Children[0].Children[0].Description = "Paging Size"
|
||||
value.Children[0].Children[1].Description = "Cookie"
|
||||
|
||||
case ControlTypeBeheraPasswordPolicy:
|
||||
value.Description += " (Password Policy - Behera Draft)"
|
||||
if value.Value != nil {
|
||||
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
sequence := value.Children[0]
|
||||
for _, child := range sequence.Children {
|
||||
if child.Tag == 0 {
|
||||
// Warning
|
||||
warningPacket := child.Children[0]
|
||||
val, err := ber.ParseInt64(warningPacket.Data.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", err)
|
||||
}
|
||||
if warningPacket.Tag == 0 {
|
||||
// timeBeforeExpiration
|
||||
value.Description += " (TimeBeforeExpiration)"
|
||||
warningPacket.Value = val
|
||||
} else if warningPacket.Tag == 1 {
|
||||
// graceAuthNsRemaining
|
||||
value.Description += " (GraceAuthNsRemaining)"
|
||||
warningPacket.Value = val
|
||||
}
|
||||
} else if child.Tag == 1 {
|
||||
// Error
|
||||
bs := child.Data.Bytes()
|
||||
if len(bs) != 1 || bs[0] > 8 {
|
||||
return fmt.Errorf("failed to decode data bytes: %s", "invalid PasswordPolicyResponse enum value")
|
||||
}
|
||||
val := int8(bs[0])
|
||||
child.Description = "Error"
|
||||
child.Value = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRequestDescriptions(packet *ber.Packet) error {
|
||||
packet.Description = "LDAP Request"
|
||||
packet.Children[0].Description = "Message ID"
|
||||
packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
|
||||
if len(packet.Children) == 3 {
|
||||
return addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error {
|
||||
resultCode := uint16(LDAPResultSuccess)
|
||||
matchedDN := ""
|
||||
description := "Success"
|
||||
if err := GetLDAPError(packet); err != nil {
|
||||
resultCode = err.(*Error).ResultCode
|
||||
matchedDN = err.(*Error).MatchedDN
|
||||
description = "Error Message"
|
||||
}
|
||||
|
||||
packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
|
||||
packet.Children[1].Children[1].Description = "Matched DN (" + matchedDN + ")"
|
||||
packet.Children[1].Children[2].Description = description
|
||||
if len(packet.Children[1].Children) > 3 {
|
||||
packet.Children[1].Children[3].Description = "Referral"
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
return addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DebugBinaryFile reads and prints packets from the given filename
|
||||
func DebugBinaryFile(fileName string) error {
|
||||
file, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return NewError(ErrorDebugging, err)
|
||||
}
|
||||
ber.PrintBytes(os.Stdout, file, "")
|
||||
packet, err := ber.DecodePacketErr(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode packet: %s", err)
|
||||
}
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
func mustEscape(c byte) bool {
|
||||
return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
|
||||
}
|
||||
|
||||
// EscapeFilter escapes from the provided LDAP filter string the special
|
||||
// characters in the set `()*\` and those out of the range 0 < c < 0x80,
|
||||
// as defined in RFC4515.
|
||||
func EscapeFilter(filter string) string {
|
||||
escape := 0
|
||||
for i := 0; i < len(filter); i++ {
|
||||
if mustEscape(filter[i]) {
|
||||
escape++
|
||||
}
|
||||
}
|
||||
if escape == 0 {
|
||||
return filter
|
||||
}
|
||||
buf := make([]byte, len(filter)+escape*2)
|
||||
for i, j := 0, 0; i < len(filter); i++ {
|
||||
c := filter[i]
|
||||
if mustEscape(c) {
|
||||
buf[j+0] = '\\'
|
||||
buf[j+1] = hex[c>>4]
|
||||
buf[j+2] = hex[c&0xf]
|
||||
j += 3
|
||||
} else {
|
||||
buf[j] = c
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// EscapeDN escapes distinguished names as described in RFC4514. Characters in the
|
||||
// set `"+,;<>\` are escaped by prepending a backslash, which is also done for trailing
|
||||
// spaces or a leading `#`. Null bytes are replaced with `\00`.
|
||||
func EscapeDN(dn string) string {
|
||||
if dn == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
builder := strings.Builder{}
|
||||
|
||||
for i, r := range dn {
|
||||
// Escape leading and trailing spaces
|
||||
if (i == 0 || i == len(dn)-1) && r == ' ' {
|
||||
builder.WriteRune('\\')
|
||||
builder.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
|
||||
// Escape leading '#'
|
||||
if i == 0 && r == '#' {
|
||||
builder.WriteRune('\\')
|
||||
builder.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
|
||||
// Escape characters as defined in RFC4514
|
||||
switch r {
|
||||
case '"', '+', ',', ';', '<', '>', '\\':
|
||||
builder.WriteRune('\\')
|
||||
builder.WriteRune(r)
|
||||
case '\x00': // Null byte may not be escaped by a leading backslash
|
||||
builder.WriteString("\\00")
|
||||
default:
|
||||
builder.WriteRune(r)
|
||||
}
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
102
vendor/github.com/go-ldap/ldap/v3/moddn.go
generated
vendored
Normal file
102
vendor/github.com/go-ldap/ldap/v3/moddn.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// ModifyDNRequest holds the request to modify a DN
|
||||
type ModifyDNRequest struct {
|
||||
DN string
|
||||
NewRDN string
|
||||
DeleteOldRDN bool
|
||||
NewSuperior string
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// NewModifyDNRequest creates a new request which can be passed to ModifyDN().
|
||||
//
|
||||
// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an
|
||||
// empty string for just changing the object's RDN.
|
||||
//
|
||||
// For moving the object without renaming, the "rdn" must be the first
|
||||
// RDN of the given DN.
|
||||
//
|
||||
// A call like
|
||||
//
|
||||
// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
|
||||
//
|
||||
// will setup the request to just rename uid=someone,dc=example,dc=org to
|
||||
// uid=newname,dc=example,dc=org.
|
||||
func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest {
|
||||
return &ModifyDNRequest{
|
||||
DN: dn,
|
||||
NewRDN: rdn,
|
||||
DeleteOldRDN: delOld,
|
||||
NewSuperior: newSup,
|
||||
}
|
||||
}
|
||||
|
||||
// NewModifyDNWithControlsRequest creates a new request which can be passed to ModifyDN()
|
||||
// and also allows setting LDAP request controls.
|
||||
//
|
||||
// Refer NewModifyDNRequest for other parameters
|
||||
func NewModifyDNWithControlsRequest(dn string, rdn string, delOld bool,
|
||||
newSup string, controls []Control) *ModifyDNRequest {
|
||||
return &ModifyDNRequest{
|
||||
DN: dn,
|
||||
NewRDN: rdn,
|
||||
DeleteOldRDN: delOld,
|
||||
NewSuperior: newSup,
|
||||
Controls: controls,
|
||||
}
|
||||
}
|
||||
|
||||
func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN"))
|
||||
if req.DeleteOldRDN {
|
||||
buf := []byte{0xff}
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, string(buf), "Delete old RDN"))
|
||||
} else {
|
||||
pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN"))
|
||||
}
|
||||
if req.NewSuperior != "" {
|
||||
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior"))
|
||||
}
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument
|
||||
// to NewModifyDNRequest() is not "").
|
||||
func (l *Conn) ModifyDN(m *ModifyDNRequest) error {
|
||||
msgCtx, err := l.doRequest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationModifyDNResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
181
vendor/github.com/go-ldap/ldap/v3/modify.go
generated
vendored
Normal file
181
vendor/github.com/go-ldap/ldap/v3/modify.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Change operation choices
|
||||
const (
|
||||
AddAttribute = 0
|
||||
DeleteAttribute = 1
|
||||
ReplaceAttribute = 2
|
||||
IncrementAttribute = 3 // (https://tools.ietf.org/html/rfc4525)
|
||||
)
|
||||
|
||||
// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type PartialAttribute struct {
|
||||
// Type is the type of the partial attribute
|
||||
Type string
|
||||
// Vals are the values of the partial attribute
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func (p *PartialAttribute) encode() *ber.Packet {
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
|
||||
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
|
||||
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
|
||||
for _, value := range p.Vals {
|
||||
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
|
||||
}
|
||||
seq.AppendChild(set)
|
||||
return seq
|
||||
}
|
||||
|
||||
// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type Change struct {
|
||||
// Operation is the type of change to be made
|
||||
Operation uint
|
||||
// Modification is the attribute to be modified
|
||||
Modification PartialAttribute
|
||||
}
|
||||
|
||||
func (c *Change) encode() *ber.Packet {
|
||||
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
|
||||
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation"))
|
||||
change.AppendChild(c.Modification.encode())
|
||||
return change
|
||||
}
|
||||
|
||||
// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type ModifyRequest struct {
|
||||
// DN is the distinguishedName of the directory entry to modify
|
||||
DN string
|
||||
// Changes contain the attributes to modify
|
||||
Changes []Change
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// Add appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Add(attrType string, attrVals []string) {
|
||||
req.appendChange(AddAttribute, attrType, attrVals)
|
||||
}
|
||||
|
||||
// Delete appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Delete(attrType string, attrVals []string) {
|
||||
req.appendChange(DeleteAttribute, attrType, attrVals)
|
||||
}
|
||||
|
||||
// Replace appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Replace(attrType string, attrVals []string) {
|
||||
req.appendChange(ReplaceAttribute, attrType, attrVals)
|
||||
}
|
||||
|
||||
// Increment appends the given attribute to the list of changes to be made
|
||||
func (req *ModifyRequest) Increment(attrType string, attrVal string) {
|
||||
req.appendChange(IncrementAttribute, attrType, []string{attrVal})
|
||||
}
|
||||
|
||||
func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) {
|
||||
req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}})
|
||||
}
|
||||
|
||||
func (req *ModifyRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
|
||||
changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
|
||||
for _, change := range req.Changes {
|
||||
changes.AppendChild(change.encode())
|
||||
}
|
||||
pkt.AppendChild(changes)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewModifyRequest creates a modify request for the given DN
|
||||
func NewModifyRequest(dn string, controls []Control) *ModifyRequest {
|
||||
return &ModifyRequest{
|
||||
DN: dn,
|
||||
Controls: controls,
|
||||
}
|
||||
}
|
||||
|
||||
// Modify performs the ModifyRequest
|
||||
func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
|
||||
msgCtx, err := l.doRequest(modifyRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationModifyResponse {
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModifyResult holds the server's response to a modify request
|
||||
type ModifyResult struct {
|
||||
// Controls are the returned controls
|
||||
Controls []Control
|
||||
// Referral is the returned referral
|
||||
Referral string
|
||||
}
|
||||
|
||||
// ModifyWithResult performs the ModifyRequest and returns the result
|
||||
func (l *Conn) ModifyWithResult(modifyRequest *ModifyRequest) (*ModifyResult, error) {
|
||||
msgCtx, err := l.doRequest(modifyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
result := &ModifyResult{
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch packet.Children[1].Tag {
|
||||
case ApplicationModifyResponse:
|
||||
if err = GetLDAPError(packet); err != nil {
|
||||
result.Referral = getReferral(err, packet)
|
||||
|
||||
return result, err
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
decodedChild, err := DecodeControl(child)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to decode child control: " + err.Error())
|
||||
}
|
||||
result.Controls = append(result.Controls, decodedChild)
|
||||
}
|
||||
}
|
||||
}
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return result, nil
|
||||
}
|
||||
119
vendor/github.com/go-ldap/ldap/v3/passwdmodify.go
generated
vendored
Normal file
119
vendor/github.com/go-ldap/ldap/v3/passwdmodify.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
const (
|
||||
passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
|
||||
)
|
||||
|
||||
// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
|
||||
type PasswordModifyRequest struct {
|
||||
// UserIdentity is an optional string representation of the user associated with the request.
|
||||
// This string may or may not be an LDAPDN [RFC2253].
|
||||
// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
|
||||
UserIdentity string
|
||||
// OldPassword, if present, contains the user's current password
|
||||
OldPassword string
|
||||
// NewPassword, if present, contains the desired password for this user
|
||||
NewPassword string
|
||||
}
|
||||
|
||||
// PasswordModifyResult holds the server response to a PasswordModifyRequest
|
||||
type PasswordModifyResult struct {
|
||||
// GeneratedPassword holds a password generated by the server, if present
|
||||
GeneratedPassword string
|
||||
// Referral are the returned referral
|
||||
Referral string
|
||||
}
|
||||
|
||||
func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
|
||||
|
||||
extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
|
||||
passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
|
||||
if req.UserIdentity != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity"))
|
||||
}
|
||||
if req.OldPassword != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password"))
|
||||
}
|
||||
if req.NewPassword != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password"))
|
||||
}
|
||||
extendedRequestValue.AppendChild(passwordModifyRequestValue)
|
||||
|
||||
pkt.AppendChild(extendedRequestValue)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPasswordModifyRequest creates a new PasswordModifyRequest
|
||||
//
|
||||
// According to the RFC 3602 (https://tools.ietf.org/html/rfc3062):
|
||||
// userIdentity is a string representing the user associated with the request.
|
||||
// This string may or may not be an LDAPDN (RFC 2253).
|
||||
// If userIdentity is empty then the operation will act on the user associated
|
||||
// with the session.
|
||||
//
|
||||
// oldPassword is the current user's password, it can be empty or it can be
|
||||
// needed depending on the session user access rights (usually an administrator
|
||||
// can change a user's password without knowing the current one) and the
|
||||
// password policy (see pwdSafeModify password policy's attribute)
|
||||
//
|
||||
// newPassword is the desired user's password. If empty the server can return
|
||||
// an error or generate a new password that will be available in the
|
||||
// PasswordModifyResult.GeneratedPassword
|
||||
func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
|
||||
return &PasswordModifyRequest{
|
||||
UserIdentity: userIdentity,
|
||||
OldPassword: oldPassword,
|
||||
NewPassword: newPassword,
|
||||
}
|
||||
}
|
||||
|
||||
// PasswordModify performs the modification request
|
||||
func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
|
||||
msgCtx, err := l.doRequest(passwordModifyRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &PasswordModifyResult{}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationExtendedResponse {
|
||||
if err = GetLDAPError(packet); err != nil {
|
||||
result.Referral = getReferral(err, packet)
|
||||
|
||||
return result, err
|
||||
}
|
||||
} else {
|
||||
return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag))
|
||||
}
|
||||
|
||||
extendedResponse := packet.Children[1]
|
||||
for _, child := range extendedResponse.Children {
|
||||
if child.Tag == ber.TagEmbeddedPDV {
|
||||
passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
|
||||
if len(passwordModifyResponseValue.Children) == 1 {
|
||||
if passwordModifyResponseValue.Children[0].Tag == ber.TagEOC {
|
||||
result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
110
vendor/github.com/go-ldap/ldap/v3/request.go
generated
vendored
Normal file
110
vendor/github.com/go-ldap/ldap/v3/request.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
var (
|
||||
errRespChanClosed = errors.New("ldap: response channel closed")
|
||||
errCouldNotRetMsg = errors.New("ldap: could not retrieve message")
|
||||
// ErrNilConnection is returned if doRequest is called with a nil connection.
|
||||
ErrNilConnection = errors.New("ldap: conn is nil, expected net.Conn")
|
||||
)
|
||||
|
||||
type request interface {
|
||||
appendTo(*ber.Packet) error
|
||||
}
|
||||
|
||||
type requestFunc func(*ber.Packet) error
|
||||
|
||||
func (f requestFunc) appendTo(p *ber.Packet) error {
|
||||
return f(p)
|
||||
}
|
||||
|
||||
func (l *Conn) doRequest(req request) (*messageContext, error) {
|
||||
if l == nil || l.conn == nil {
|
||||
return nil, ErrNilConnection
|
||||
}
|
||||
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
if err := req.appendTo(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
l.Debug.PrintPacket(packet)
|
||||
}
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return msgCtx, nil
|
||||
}
|
||||
|
||||
func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) {
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errRespChanClosed)
|
||||
}
|
||||
packet, err := packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if packet == nil {
|
||||
return nil, NewError(ErrorNetwork, errCouldNotRetMsg)
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err = addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.Debug.PrintPacket(packet)
|
||||
}
|
||||
return packet, nil
|
||||
}
|
||||
|
||||
func getReferral(err error, packet *ber.Packet) (referral string) {
|
||||
if !IsErrorWithCode(err, LDAPResultReferral) {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(packet.Children) < 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// The packet Tag itself (of child 2) is generally a ber.TagObjectDescriptor with referrals however OpenLDAP
|
||||
// seemingly returns a ber.Tag.GeneralizedTime. Every currently tested LDAP server which returns referrals returns
|
||||
// an ASN.1 BER packet with the Type of ber.TypeConstructed and Class of ber.ClassApplication however. Thus this
|
||||
// check expressly checks these fields instead.
|
||||
//
|
||||
// Related Issues:
|
||||
// - https://github.com/authelia/authelia/issues/4199 (downstream)
|
||||
if len(packet.Children[1].Children) == 0 || (packet.Children[1].TagType != ber.TypeConstructed || packet.Children[1].ClassType != ber.ClassApplication) {
|
||||
return ""
|
||||
}
|
||||
|
||||
var ok bool
|
||||
|
||||
for _, child := range packet.Children[1].Children {
|
||||
// The referral URI itself should be contained within a child which has a Tag of ber.BitString or
|
||||
// ber.TagPrintableString, and the Type of ber.TypeConstructed and the Class of ClassContext. As soon as any of
|
||||
// these conditions is not true we can skip this child.
|
||||
if (child.Tag != ber.TagBitString && child.Tag != ber.TagPrintableString) || child.TagType != ber.TypeConstructed || child.ClassType != ber.ClassContext {
|
||||
continue
|
||||
}
|
||||
|
||||
if referral, ok = child.Children[0].Value.(string); ok {
|
||||
return referral
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
207
vendor/github.com/go-ldap/ldap/v3/response.go
generated
vendored
Normal file
207
vendor/github.com/go-ldap/ldap/v3/response.go
generated
vendored
Normal file
@@ -0,0 +1,207 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// Response defines an interface to get data from an LDAP server
|
||||
type Response interface {
|
||||
Entry() *Entry
|
||||
Referral() string
|
||||
Controls() []Control
|
||||
Err() error
|
||||
Next() bool
|
||||
}
|
||||
|
||||
type searchResponse struct {
|
||||
conn *Conn
|
||||
ch chan *SearchSingleResult
|
||||
|
||||
entry *Entry
|
||||
referral string
|
||||
controls []Control
|
||||
err error
|
||||
}
|
||||
|
||||
// Entry returns an entry from the given search request
|
||||
func (r *searchResponse) Entry() *Entry {
|
||||
return r.entry
|
||||
}
|
||||
|
||||
// Referral returns a referral from the given search request
|
||||
func (r *searchResponse) Referral() string {
|
||||
return r.referral
|
||||
}
|
||||
|
||||
// Controls returns controls from the given search request
|
||||
func (r *searchResponse) Controls() []Control {
|
||||
return r.controls
|
||||
}
|
||||
|
||||
// Err returns an error when the given search request was failed
|
||||
func (r *searchResponse) Err() error {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// Next returns whether next data exist or not
|
||||
func (r *searchResponse) Next() bool {
|
||||
res, ok := <-r.ch
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if res == nil {
|
||||
return false
|
||||
}
|
||||
r.err = res.Error
|
||||
if r.err != nil {
|
||||
return false
|
||||
}
|
||||
r.entry = res.Entry
|
||||
r.referral = res.Referral
|
||||
r.controls = res.Controls
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *searchResponse) start(ctx context.Context, searchRequest *SearchRequest) {
|
||||
go func() {
|
||||
defer func() {
|
||||
close(r.ch)
|
||||
if err := recover(); err != nil {
|
||||
r.conn.err = fmt.Errorf("ldap: recovered panic in searchResponse: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if r.conn.IsClosing() {
|
||||
return
|
||||
}
|
||||
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, r.conn.nextMessageID(), "MessageID"))
|
||||
// encode search request
|
||||
err := searchRequest.appendTo(packet)
|
||||
if err != nil {
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
r.conn.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := r.conn.sendMessage(packet)
|
||||
if err != nil {
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
defer r.conn.finishMessage(msgCtx)
|
||||
|
||||
foundSearchSingleResultDone := false
|
||||
for !foundSearchSingleResultDone {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.conn.Debug.Printf("%d: %s", msgCtx.id, ctx.Err().Error())
|
||||
return
|
||||
default:
|
||||
r.conn.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
err := NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
r.conn.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
|
||||
if r.conn.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
switch packet.Children[1].Tag {
|
||||
case ApplicationSearchResultEntry:
|
||||
result := &SearchSingleResult{
|
||||
Entry: &Entry{
|
||||
DN: packet.Children[1].Children[0].Value.(string),
|
||||
Attributes: unpackAttributes(packet.Children[1].Children[1].Children),
|
||||
},
|
||||
}
|
||||
if len(packet.Children) != 3 {
|
||||
r.ch <- result
|
||||
continue
|
||||
}
|
||||
decoded, err := DecodeControl(packet.Children[2].Children[0])
|
||||
if err != nil {
|
||||
werr := fmt.Errorf("failed to decode search result entry: %w", err)
|
||||
result.Error = werr
|
||||
r.ch <- result
|
||||
return
|
||||
}
|
||||
result.Controls = append(result.Controls, decoded)
|
||||
r.ch <- result
|
||||
|
||||
case ApplicationSearchResultDone:
|
||||
if err := GetLDAPError(packet); err != nil {
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
result := &SearchSingleResult{}
|
||||
for _, child := range packet.Children[2].Children {
|
||||
decodedChild, err := DecodeControl(child)
|
||||
if err != nil {
|
||||
werr := fmt.Errorf("failed to decode child control: %w", err)
|
||||
r.ch <- &SearchSingleResult{Error: werr}
|
||||
return
|
||||
}
|
||||
result.Controls = append(result.Controls, decodedChild)
|
||||
}
|
||||
r.ch <- result
|
||||
}
|
||||
foundSearchSingleResultDone = true
|
||||
|
||||
case ApplicationSearchResultReference:
|
||||
ref := packet.Children[1].Children[0].Value.(string)
|
||||
r.ch <- &SearchSingleResult{Referral: ref}
|
||||
|
||||
case ApplicationIntermediateResponse:
|
||||
decoded, err := DecodeControl(packet.Children[1])
|
||||
if err != nil {
|
||||
werr := fmt.Errorf("failed to decode intermediate response: %w", err)
|
||||
r.ch <- &SearchSingleResult{Error: werr}
|
||||
return
|
||||
}
|
||||
result := &SearchSingleResult{}
|
||||
result.Controls = append(result.Controls, decoded)
|
||||
r.ch <- result
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("unknown tag: %d", packet.Children[1].Tag)
|
||||
r.ch <- &SearchSingleResult{Error: err}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
r.conn.Debug.Printf("%d: returning", msgCtx.id)
|
||||
}()
|
||||
}
|
||||
|
||||
func newSearchResponse(conn *Conn, bufferSize int) *searchResponse {
|
||||
var ch chan *SearchSingleResult
|
||||
if bufferSize > 0 {
|
||||
ch = make(chan *SearchSingleResult, bufferSize)
|
||||
} else {
|
||||
ch = make(chan *SearchSingleResult)
|
||||
}
|
||||
return &searchResponse{
|
||||
conn: conn,
|
||||
ch: ch,
|
||||
}
|
||||
}
|
||||
690
vendor/github.com/go-ldap/ldap/v3/search.go
generated
vendored
Normal file
690
vendor/github.com/go-ldap/ldap/v3/search.go
generated
vendored
Normal file
@@ -0,0 +1,690 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// scope choices
|
||||
const (
|
||||
ScopeBaseObject = 0
|
||||
ScopeSingleLevel = 1
|
||||
ScopeWholeSubtree = 2
|
||||
)
|
||||
|
||||
// ScopeMap contains human readable descriptions of scope choices
|
||||
var ScopeMap = map[int]string{
|
||||
ScopeBaseObject: "Base Object",
|
||||
ScopeSingleLevel: "Single Level",
|
||||
ScopeWholeSubtree: "Whole Subtree",
|
||||
}
|
||||
|
||||
// derefAliases
|
||||
const (
|
||||
NeverDerefAliases = 0
|
||||
DerefInSearching = 1
|
||||
DerefFindingBaseObj = 2
|
||||
DerefAlways = 3
|
||||
)
|
||||
|
||||
// DerefMap contains human readable descriptions of derefAliases choices
|
||||
var DerefMap = map[int]string{
|
||||
NeverDerefAliases: "NeverDerefAliases",
|
||||
DerefInSearching: "DerefInSearching",
|
||||
DerefFindingBaseObj: "DerefFindingBaseObj",
|
||||
DerefAlways: "DerefAlways",
|
||||
}
|
||||
|
||||
// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
|
||||
// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
|
||||
// same input map of attributes, the output entry will contain the same order of attributes
|
||||
func NewEntry(dn string, attributes map[string][]string) *Entry {
|
||||
var attributeNames []string
|
||||
for attributeName := range attributes {
|
||||
attributeNames = append(attributeNames, attributeName)
|
||||
}
|
||||
sort.Strings(attributeNames)
|
||||
|
||||
var encodedAttributes []*EntryAttribute
|
||||
for _, attributeName := range attributeNames {
|
||||
encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
|
||||
}
|
||||
return &Entry{
|
||||
DN: dn,
|
||||
Attributes: encodedAttributes,
|
||||
}
|
||||
}
|
||||
|
||||
// Entry represents a single search result entry
|
||||
type Entry struct {
|
||||
// DN is the distinguished name of the entry
|
||||
DN string
|
||||
// Attributes are the returned attributes for the entry
|
||||
Attributes []*EntryAttribute
|
||||
}
|
||||
|
||||
// GetAttributeValues returns the values for the named attribute, or an empty list
|
||||
func (e *Entry) GetAttributeValues(attribute string) []string {
|
||||
for _, attr := range e.Attributes {
|
||||
if attr.Name == attribute {
|
||||
return attr.Values
|
||||
}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// GetEqualFoldAttributeValues returns the values for the named attribute, or an
|
||||
// empty list. Attribute matching is done with strings.EqualFold.
|
||||
func (e *Entry) GetEqualFoldAttributeValues(attribute string) []string {
|
||||
for _, attr := range e.Attributes {
|
||||
if strings.EqualFold(attribute, attr.Name) {
|
||||
return attr.Values
|
||||
}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
|
||||
func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
|
||||
for _, attr := range e.Attributes {
|
||||
if attr.Name == attribute {
|
||||
return attr.ByteValues
|
||||
}
|
||||
}
|
||||
return [][]byte{}
|
||||
}
|
||||
|
||||
// GetEqualFoldRawAttributeValues returns the byte values for the named attribute, or an empty list
|
||||
func (e *Entry) GetEqualFoldRawAttributeValues(attribute string) [][]byte {
|
||||
for _, attr := range e.Attributes {
|
||||
if strings.EqualFold(attr.Name, attribute) {
|
||||
return attr.ByteValues
|
||||
}
|
||||
}
|
||||
return [][]byte{}
|
||||
}
|
||||
|
||||
// GetAttributeValue returns the first value for the named attribute, or ""
|
||||
func (e *Entry) GetAttributeValue(attribute string) string {
|
||||
values := e.GetAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// GetEqualFoldAttributeValue returns the first value for the named attribute, or "".
|
||||
// Attribute comparison is done with strings.EqualFold.
|
||||
func (e *Entry) GetEqualFoldAttributeValue(attribute string) string {
|
||||
values := e.GetEqualFoldAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
|
||||
func (e *Entry) GetRawAttributeValue(attribute string) []byte {
|
||||
values := e.GetRawAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// GetEqualFoldRawAttributeValue returns the first value for the named attribute, or an empty slice
|
||||
func (e *Entry) GetEqualFoldRawAttributeValue(attribute string) []byte {
|
||||
values := e.GetEqualFoldRawAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (e *Entry) Print() {
|
||||
fmt.Printf("DN: %s\n", e.DN)
|
||||
for _, attr := range e.Attributes {
|
||||
attr.Print()
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description indenting
|
||||
func (e *Entry) PrettyPrint(indent int) {
|
||||
fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
|
||||
for _, attr := range e.Attributes {
|
||||
attr.PrettyPrint(indent + 2)
|
||||
}
|
||||
}
|
||||
|
||||
// Describe the tag to use for struct field tags
|
||||
const decoderTagName = "ldap"
|
||||
|
||||
// readTag will read the reflect.StructField value for
|
||||
// the key defined in decoderTagName. If omitempty is
|
||||
// specified, the field may not be filled.
|
||||
func readTag(f reflect.StructField) (string, bool) {
|
||||
val, ok := f.Tag.Lookup(decoderTagName)
|
||||
if !ok {
|
||||
return f.Name, false
|
||||
}
|
||||
opts := strings.Split(val, ",")
|
||||
omit := false
|
||||
if len(opts) == 2 {
|
||||
omit = opts[1] == "omitempty"
|
||||
}
|
||||
return opts[0], omit
|
||||
}
|
||||
|
||||
// Unmarshal parses the Entry in the value pointed to by i
|
||||
//
|
||||
// Currently, this methods only supports struct fields of type
|
||||
// string, []string, int, int64, []byte, *DN, []*DN or time.Time. Other field types
|
||||
// will not be regarded. If the field type is a string or int but multiple
|
||||
// attribute values are returned, the first value will be used to fill the field.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// type UserEntry struct {
|
||||
// // Fields with the tag key `dn` are automatically filled with the
|
||||
// // objects distinguishedName. This can be used multiple times.
|
||||
// DN string `ldap:"dn"`
|
||||
//
|
||||
// // This field will be filled with the attribute value for
|
||||
// // userPrincipalName. An attribute can be read into a struct field
|
||||
// // multiple times. Missing attributes will not result in an error.
|
||||
// UserPrincipalName string `ldap:"userPrincipalName"`
|
||||
//
|
||||
// // memberOf may have multiple values. If you don't
|
||||
// // know the amount of attribute values at runtime, use a string array.
|
||||
// MemberOf []string `ldap:"memberOf"`
|
||||
//
|
||||
// // ID is an integer value, it will fail unmarshaling when the given
|
||||
// // attribute value cannot be parsed into an integer.
|
||||
// ID int `ldap:"id"`
|
||||
//
|
||||
// // LongID is similar to ID but uses an int64 instead.
|
||||
// LongID int64 `ldap:"longId"`
|
||||
//
|
||||
// // Data is similar to MemberOf a slice containing all attribute
|
||||
// // values.
|
||||
// Data []byte `ldap:"data"`
|
||||
//
|
||||
// // Time is parsed with the generalizedTime spec into a time.Time
|
||||
// Created time.Time `ldap:"createdTimestamp"`
|
||||
//
|
||||
// // *DN is parsed with the ParseDN
|
||||
// Owner *ldap.DN `ldap:"owner"`
|
||||
//
|
||||
// // []*DN is parsed with the ParseDN
|
||||
// Children []*ldap.DN `ldap:"children"`
|
||||
//
|
||||
// // This won't work, as the field is not of type string. For this
|
||||
// // to work, you'll have to temporarily store the result in string
|
||||
// // (or string array) and convert it to the desired type afterwards.
|
||||
// UserAccountControl uint32 `ldap:"userPrincipalName"`
|
||||
// }
|
||||
// user := UserEntry{}
|
||||
//
|
||||
// if err := result.Unmarshal(&user); err != nil {
|
||||
// // ...
|
||||
// }
|
||||
func (e *Entry) Unmarshal(i interface{}) (err error) {
|
||||
// Make sure it's a ptr
|
||||
if vo := reflect.ValueOf(i).Kind(); vo != reflect.Ptr {
|
||||
return fmt.Errorf("ldap: cannot use %s, expected pointer to a struct", vo)
|
||||
}
|
||||
|
||||
sv, st := reflect.ValueOf(i).Elem(), reflect.TypeOf(i).Elem()
|
||||
// Make sure it's pointing to a struct
|
||||
if sv.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("ldap: expected pointer to a struct, got %s", sv.Kind())
|
||||
}
|
||||
|
||||
for n := 0; n < st.NumField(); n++ {
|
||||
// Holds struct field value and type
|
||||
fv, ft := sv.Field(n), st.Field(n)
|
||||
|
||||
// skip unexported fields
|
||||
if ft.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// omitempty can be safely discarded, as it's not needed when unmarshalling
|
||||
fieldTag, _ := readTag(ft)
|
||||
|
||||
// Fill the field with the distinguishedName if the tag key is `dn`
|
||||
if fieldTag == "dn" {
|
||||
fv.SetString(e.DN)
|
||||
continue
|
||||
}
|
||||
|
||||
values := e.GetAttributeValues(fieldTag)
|
||||
if len(values) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch fv.Interface().(type) {
|
||||
case []string:
|
||||
for _, item := range values {
|
||||
fv.Set(reflect.Append(fv, reflect.ValueOf(item)))
|
||||
}
|
||||
case string:
|
||||
fv.SetString(values[0])
|
||||
case []byte:
|
||||
fv.SetBytes([]byte(values[0]))
|
||||
case int, int64:
|
||||
intVal, err := strconv.ParseInt(values[0], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ldap: could not parse value '%s' into int field", values[0])
|
||||
}
|
||||
fv.SetInt(intVal)
|
||||
case time.Time:
|
||||
t, err := ber.ParseGeneralizedTime([]byte(values[0]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("ldap: could not parse value '%s' into time.Time field", values[0])
|
||||
}
|
||||
fv.Set(reflect.ValueOf(t))
|
||||
case *DN:
|
||||
dn, err := ParseDN(values[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("ldap: could not parse value '%s' into *ldap.DN field", values[0])
|
||||
}
|
||||
fv.Set(reflect.ValueOf(dn))
|
||||
case []*DN:
|
||||
for _, item := range values {
|
||||
dn, err := ParseDN(item)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ldap: could not parse value '%s' into *ldap.DN field", item)
|
||||
}
|
||||
fv.Set(reflect.Append(fv, reflect.ValueOf(dn)))
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("ldap: expected field to be of type string, []string, int, int64, []byte, *DN, []*DN or time.Time, got %v", ft.Type)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
|
||||
func NewEntryAttribute(name string, values []string) *EntryAttribute {
|
||||
var bytes [][]byte
|
||||
for _, value := range values {
|
||||
bytes = append(bytes, []byte(value))
|
||||
}
|
||||
return &EntryAttribute{
|
||||
Name: name,
|
||||
Values: values,
|
||||
ByteValues: bytes,
|
||||
}
|
||||
}
|
||||
|
||||
// EntryAttribute holds a single attribute
|
||||
type EntryAttribute struct {
|
||||
// Name is the name of the attribute
|
||||
Name string
|
||||
// Values contain the string values of the attribute
|
||||
Values []string
|
||||
// ByteValues contain the raw values of the attribute
|
||||
ByteValues [][]byte
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (e *EntryAttribute) Print() {
|
||||
fmt.Printf("%s: %s\n", e.Name, e.Values)
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (e *EntryAttribute) PrettyPrint(indent int) {
|
||||
fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
|
||||
}
|
||||
|
||||
// SearchResult holds the server's response to a search request
|
||||
type SearchResult struct {
|
||||
// Entries are the returned entries
|
||||
Entries []*Entry
|
||||
// Referrals are the returned referrals
|
||||
Referrals []string
|
||||
// Controls are the returned controls
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (s *SearchResult) Print() {
|
||||
for _, entry := range s.Entries {
|
||||
entry.Print()
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (s *SearchResult) PrettyPrint(indent int) {
|
||||
for _, entry := range s.Entries {
|
||||
entry.PrettyPrint(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// appendTo appends all entries of `s` to `r`
|
||||
func (s *SearchResult) appendTo(r *SearchResult) {
|
||||
r.Entries = append(r.Entries, s.Entries...)
|
||||
r.Referrals = append(r.Referrals, s.Referrals...)
|
||||
r.Controls = append(r.Controls, s.Controls...)
|
||||
}
|
||||
|
||||
// SearchSingleResult holds the server's single entry response to a search request
|
||||
type SearchSingleResult struct {
|
||||
// Entry is the returned entry
|
||||
Entry *Entry
|
||||
// Referral is the returned referral
|
||||
Referral string
|
||||
// Controls are the returned controls
|
||||
Controls []Control
|
||||
// Error is set when the search request was failed
|
||||
Error error
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (s *SearchSingleResult) Print() {
|
||||
s.Entry.Print()
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (s *SearchSingleResult) PrettyPrint(indent int) {
|
||||
s.Entry.PrettyPrint(indent)
|
||||
}
|
||||
|
||||
// SearchRequest represents a search request to send to the server
|
||||
type SearchRequest struct {
|
||||
BaseDN string
|
||||
Scope int
|
||||
DerefAliases int
|
||||
SizeLimit int
|
||||
TimeLimit int
|
||||
TypesOnly bool
|
||||
Filter string
|
||||
Attributes []string
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (req *SearchRequest) appendTo(envelope *ber.Packet) error {
|
||||
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
|
||||
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit"))
|
||||
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit"))
|
||||
pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only"))
|
||||
// compile and encode filter
|
||||
filterPacket, err := CompileFilter(req.Filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkt.AppendChild(filterPacket)
|
||||
// encode attributes
|
||||
attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
|
||||
for _, attribute := range req.Attributes {
|
||||
attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
|
||||
}
|
||||
pkt.AppendChild(attributesPacket)
|
||||
|
||||
envelope.AppendChild(pkt)
|
||||
if len(req.Controls) > 0 {
|
||||
envelope.AppendChild(encodeControls(req.Controls))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewSearchRequest creates a new search request
|
||||
func NewSearchRequest(
|
||||
BaseDN string,
|
||||
Scope, DerefAliases, SizeLimit, TimeLimit int,
|
||||
TypesOnly bool,
|
||||
Filter string,
|
||||
Attributes []string,
|
||||
Controls []Control,
|
||||
) *SearchRequest {
|
||||
return &SearchRequest{
|
||||
BaseDN: BaseDN,
|
||||
Scope: Scope,
|
||||
DerefAliases: DerefAliases,
|
||||
SizeLimit: SizeLimit,
|
||||
TimeLimit: TimeLimit,
|
||||
TypesOnly: TypesOnly,
|
||||
Filter: Filter,
|
||||
Attributes: Attributes,
|
||||
Controls: Controls,
|
||||
}
|
||||
}
|
||||
|
||||
// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
|
||||
// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
|
||||
// The following four cases are possible given the arguments:
|
||||
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
|
||||
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
|
||||
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
|
||||
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
|
||||
//
|
||||
// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
|
||||
func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
|
||||
var pagingControl *ControlPaging
|
||||
|
||||
control := FindControl(searchRequest.Controls, ControlTypePaging)
|
||||
if control == nil {
|
||||
pagingControl = NewControlPaging(pagingSize)
|
||||
searchRequest.Controls = append(searchRequest.Controls, pagingControl)
|
||||
} else {
|
||||
castControl, ok := control.(*ControlPaging)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control)
|
||||
}
|
||||
if castControl.PagingSize != pagingSize {
|
||||
return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
|
||||
}
|
||||
pagingControl = castControl
|
||||
}
|
||||
|
||||
searchResult := new(SearchResult)
|
||||
for {
|
||||
result, err := l.Search(searchRequest)
|
||||
if result != nil {
|
||||
result.appendTo(searchResult)
|
||||
} else {
|
||||
if err == nil {
|
||||
// We have to do this beautifulness in case something absolutely strange happens, which
|
||||
// should only occur in case there is no packet, but also no error.
|
||||
return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// If an error occurred, all results that have been received so far will be returned
|
||||
return searchResult, err
|
||||
}
|
||||
|
||||
l.Debug.Printf("Looking for Paging Control...")
|
||||
pagingResult := FindControl(result.Controls, ControlTypePaging)
|
||||
if pagingResult == nil {
|
||||
pagingControl = nil
|
||||
l.Debug.Printf("Could not find paging control. Breaking...")
|
||||
break
|
||||
}
|
||||
|
||||
cookie := pagingResult.(*ControlPaging).Cookie
|
||||
if len(cookie) == 0 {
|
||||
pagingControl = nil
|
||||
l.Debug.Printf("Could not find cookie. Breaking...")
|
||||
break
|
||||
}
|
||||
pagingControl.SetCookie(cookie)
|
||||
}
|
||||
|
||||
if pagingControl != nil {
|
||||
l.Debug.Printf("Abandoning Paging...")
|
||||
pagingControl.PagingSize = 0
|
||||
if _, err := l.Search(searchRequest); err != nil {
|
||||
return searchResult, err
|
||||
}
|
||||
}
|
||||
|
||||
return searchResult, nil
|
||||
}
|
||||
|
||||
// Search performs the given search request
|
||||
func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
|
||||
msgCtx, err := l.doRequest(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
result := &SearchResult{
|
||||
Entries: make([]*Entry, 0),
|
||||
Referrals: make([]string, 0),
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
|
||||
for {
|
||||
packet, err := l.readPacket(msgCtx)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
switch packet.Children[1].Tag {
|
||||
case 4:
|
||||
entry := &Entry{
|
||||
DN: packet.Children[1].Children[0].Value.(string),
|
||||
Attributes: unpackAttributes(packet.Children[1].Children[1].Children),
|
||||
}
|
||||
result.Entries = append(result.Entries, entry)
|
||||
case 5:
|
||||
err := GetLDAPError(packet)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
decodedChild, err := DecodeControl(child)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to decode child control: %s", err)
|
||||
}
|
||||
result.Controls = append(result.Controls, decodedChild)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
case 19:
|
||||
result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SearchAsync performs a search request and returns all search results asynchronously.
|
||||
// This means you get all results until an error happens (or the search successfully finished),
|
||||
// e.g. for size / time limited requests all are recieved until the limit is reached.
|
||||
// To stop the search, call cancel function of the context.
|
||||
func (l *Conn) SearchAsync(
|
||||
ctx context.Context, searchRequest *SearchRequest, bufferSize int) Response {
|
||||
r := newSearchResponse(l, bufferSize)
|
||||
r.start(ctx, searchRequest)
|
||||
return r
|
||||
}
|
||||
|
||||
// Syncrepl is a short name for LDAP Sync Replication engine that works on the
|
||||
// consumer-side. This can perform a persistent search and returns an entry
|
||||
// when the entry is updated on the server side.
|
||||
// To stop the search, call cancel function of the context.
|
||||
func (l *Conn) Syncrepl(
|
||||
ctx context.Context, searchRequest *SearchRequest, bufferSize int,
|
||||
mode ControlSyncRequestMode, cookie []byte, reloadHint bool,
|
||||
) Response {
|
||||
control := NewControlSyncRequest(mode, cookie, reloadHint)
|
||||
searchRequest.Controls = append(searchRequest.Controls, control)
|
||||
r := newSearchResponse(l, bufferSize)
|
||||
r.start(ctx, searchRequest)
|
||||
return r
|
||||
}
|
||||
|
||||
// unpackAttributes will extract all given LDAP attributes and it's values
|
||||
// from the ber.Packet
|
||||
func unpackAttributes(children []*ber.Packet) []*EntryAttribute {
|
||||
entries := make([]*EntryAttribute, len(children))
|
||||
for i, child := range children {
|
||||
length := len(child.Children[1].Children)
|
||||
entry := &EntryAttribute{
|
||||
Name: child.Children[0].Value.(string),
|
||||
// pre-allocate the slice since we can determine
|
||||
// the number of attributes at this point
|
||||
Values: make([]string, length),
|
||||
ByteValues: make([][]byte, length),
|
||||
}
|
||||
|
||||
for i, value := range child.Children[1].Children {
|
||||
entry.ByteValues[i] = value.ByteValue
|
||||
entry.Values[i] = value.Value.(string)
|
||||
}
|
||||
entries[i] = entry
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// DirSync does a Search with dirSync Control.
|
||||
func (l *Conn) DirSync(
|
||||
searchRequest *SearchRequest, flags int64, maxAttrCount int64, cookie []byte,
|
||||
) (*SearchResult, error) {
|
||||
control := FindControl(searchRequest.Controls, ControlTypeDirSync)
|
||||
if control == nil {
|
||||
c := NewRequestControlDirSync(flags, maxAttrCount, cookie)
|
||||
searchRequest.Controls = append(searchRequest.Controls, c)
|
||||
} else {
|
||||
c := control.(*ControlDirSync)
|
||||
if c.Flags != flags {
|
||||
return nil, fmt.Errorf("flags given in search request (%d) conflicts with flags given in search call (%d)", c.Flags, flags)
|
||||
}
|
||||
if c.MaxAttrCount != maxAttrCount {
|
||||
return nil, fmt.Errorf("MaxAttrCnt given in search request (%d) conflicts with maxAttrCount given in search call (%d)", c.MaxAttrCount, maxAttrCount)
|
||||
}
|
||||
}
|
||||
searchResult, err := l.Search(searchRequest)
|
||||
l.Debug.Printf("Looking for result...")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if searchResult == nil {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
|
||||
}
|
||||
|
||||
l.Debug.Printf("Looking for DirSync Control...")
|
||||
resultControl := FindControl(searchResult.Controls, ControlTypeDirSync)
|
||||
if resultControl == nil {
|
||||
l.Debug.Printf("Could not find dirSyncControl control. Breaking...")
|
||||
return searchResult, nil
|
||||
}
|
||||
|
||||
cookie = resultControl.(*ControlDirSync).Cookie
|
||||
if len(cookie) == 0 {
|
||||
l.Debug.Printf("Could not find cookie. Breaking...")
|
||||
return searchResult, nil
|
||||
}
|
||||
|
||||
return searchResult, nil
|
||||
}
|
||||
|
||||
// DirSyncDirSyncAsync performs a search request and returns all search results
|
||||
// asynchronously. This is efficient when the server returns lots of entries.
|
||||
func (l *Conn) DirSyncAsync(
|
||||
ctx context.Context, searchRequest *SearchRequest, bufferSize int,
|
||||
flags, maxAttrCount int64, cookie []byte,
|
||||
) Response {
|
||||
control := NewRequestControlDirSync(flags, maxAttrCount, cookie)
|
||||
searchRequest.Controls = append(searchRequest.Controls, control)
|
||||
r := newSearchResponse(l, bufferSize)
|
||||
r.start(ctx, searchRequest)
|
||||
return r
|
||||
}
|
||||
38
vendor/github.com/go-ldap/ldap/v3/unbind.go
generated
vendored
Normal file
38
vendor/github.com/go-ldap/ldap/v3/unbind.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
// ErrConnUnbound is returned when Unbind is called on an already closing connection.
|
||||
var ErrConnUnbound = NewError(ErrorNetwork, errors.New("ldap: connection is closed"))
|
||||
|
||||
type unbindRequest struct{}
|
||||
|
||||
func (unbindRequest) appendTo(envelope *ber.Packet) error {
|
||||
envelope.AppendChild(ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationUnbindRequest, nil, ApplicationMap[ApplicationUnbindRequest]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unbind will perform an unbind request. The Unbind operation
|
||||
// should be thought of as the "quit" operation.
|
||||
// See https://datatracker.ietf.org/doc/html/rfc4511#section-4.3
|
||||
func (l *Conn) Unbind() error {
|
||||
if l.IsClosing() {
|
||||
return ErrConnUnbound
|
||||
}
|
||||
|
||||
_, err := l.doRequest(unbindRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sending an unbindRequest will make the connection unusable.
|
||||
// Pending requests will fail with:
|
||||
// LDAP Result Code 200 "Network Error": ldap: response channel closed
|
||||
l.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
91
vendor/github.com/go-ldap/ldap/v3/whoami.go
generated
vendored
Normal file
91
vendor/github.com/go-ldap/ldap/v3/whoami.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package ldap
|
||||
|
||||
// This file contains the "Who Am I?" extended operation as specified in rfc 4532
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc4532
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
ber "github.com/go-asn1-ber/asn1-ber"
|
||||
)
|
||||
|
||||
type whoAmIRequest bool
|
||||
|
||||
// WhoAmIResult is returned by the WhoAmI() call
|
||||
type WhoAmIResult struct {
|
||||
AuthzID string
|
||||
}
|
||||
|
||||
func (r whoAmIRequest) encode() (*ber.Packet, error) {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Who Am I? Extended Operation")
|
||||
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, ControlTypeWhoAmI, "Extended Request Name: Who Am I? OID"))
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// WhoAmI returns the authzId the server thinks we are, you may pass controls
|
||||
// like a Proxied Authorization control
|
||||
func (l *Conn) WhoAmI(controls []Control) (*WhoAmIResult, error) {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
req := whoAmIRequest(true)
|
||||
encodedWhoAmIRequest, err := req.encode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packet.AppendChild(encodedWhoAmIRequest)
|
||||
|
||||
if len(controls) != 0 {
|
||||
packet.AppendChild(encodeControls(controls))
|
||||
}
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
result := &WhoAmIResult{}
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if packet == nil {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationExtendedResponse {
|
||||
if err := GetLDAPError(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag))
|
||||
}
|
||||
|
||||
extendedResponse := packet.Children[1]
|
||||
for _, child := range extendedResponse.Children {
|
||||
if child.Tag == 11 {
|
||||
result.AuthzID = ber.DecodeString(child.Data.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
10
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
10
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Changelog
|
||||
|
||||
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
|
||||
|
||||
## Changelog
|
||||
26
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
26
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to this project!
|
||||
|
||||
### Tips
|
||||
|
||||
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
|
||||
|
||||
Always try to include a test case! If it is not possible or not necessary,
|
||||
please explain why in the pull request description.
|
||||
|
||||
### Releasing
|
||||
|
||||
Commits that would precipitate a SemVer change, as desrcibed in the Conventional
|
||||
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
|
||||
to create a release candidate pull request. Once submitted, `release-please`
|
||||
will create a release.
|
||||
|
||||
For tips on how to work with `release-please`, see its documentation.
|
||||
|
||||
### Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
You may have already signed it for other Google projects.
|
||||
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Paul Borman <borman@google.com>
|
||||
bmatsuo
|
||||
shawnps
|
||||
theory
|
||||
jboverfelt
|
||||
dsymonds
|
||||
cd1
|
||||
wallclockbuilder
|
||||
dansouza
|
||||
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
21
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
21
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# uuid
|
||||
The uuid package generates and inspects UUIDs based on
|
||||
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
|
||||
and DCE 1.1: Authentication and Security Services.
|
||||
|
||||
This package is based on the github.com/pborman/uuid package (previously named
|
||||
code.google.com/p/go-uuid). It differs from these earlier packages in that
|
||||
a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||
|
||||
###### Install
|
||||
```sh
|
||||
go get github.com/google/uuid
|
||||
```
|
||||
|
||||
###### Documentation
|
||||
[](https://pkg.go.dev/github.com/google/uuid)
|
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without
|
||||
installing this package by using the GoDoc site here:
|
||||
http://pkg.go.dev/github.com/google/uuid
|
||||
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
|
||||
uuid, err := NewUUID()
|
||||
if err == nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() (UUID, error) {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() (UUID, error) {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
||||
// for Version 2 UUIDs.
|
||||
func (uuid UUID) Domain() Domain {
|
||||
return Domain(uuid[9])
|
||||
}
|
||||
|
||||
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
||||
// UUIDs.
|
||||
func (uuid UUID) ID() uint32 {
|
||||
return binary.BigEndian.Uint32(uuid[0:4])
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
||||
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uuid generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
||||
// Services.
|
||||
//
|
||||
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
||||
// maps or compared directly.
|
||||
package uuid
|
||||
53
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
53
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known namespace IDs and UUIDs
|
||||
var (
|
||||
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||
Nil UUID // empty UUID, all zeros
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
||||
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "fmt"
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (uuid UUID) MarshalText() ([]byte, error) {
|
||||
var js [36]byte
|
||||
encodeHex(js[:], uuid)
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*uuid = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (uuid UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(uuid[:], data)
|
||||
return nil
|
||||
}
|
||||
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeMu sync.Mutex
|
||||
ifname string // name of interface being used
|
||||
nodeID [6]byte // hardware for version 1 UUIDs
|
||||
zeroID [6]byte // nodeID with only 0's
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return setNodeInterface(name)
|
||||
}
|
||||
|
||||
func setNodeInterface(name string) bool {
|
||||
iname, addr := getHardwareInterface(name) // null implementation for js
|
||||
if iname != "" && addr != nil {
|
||||
ifname = iname
|
||||
copy(nodeID[:], addr)
|
||||
return true
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
ifname = "random"
|
||||
randomBits(nodeID[:])
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
nid := nodeID
|
||||
return nid[:]
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
copy(nodeID[:], id)
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
var node [6]byte
|
||||
copy(node[:], uuid[10:])
|
||||
return node[:]
|
||||
}
|
||||
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build js
|
||||
|
||||
package uuid
|
||||
|
||||
// getHardwareInterface returns nil values for the JS version of the code.
|
||||
// This removes the "net" dependency, because it is not used in the browser.
|
||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
||||
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !js
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var interfaces []net.Interface // cached list of interfaces
|
||||
|
||||
// getHardwareInterface returns the name and hardware address of interface name.
|
||||
// If name is "" then the name and hardware address of one of the system's
|
||||
// interfaces is returned. If no interfaces are found (name does not exist or
|
||||
// there are no interfaces) then "", nil is returned.
|
||||
//
|
||||
// Only addresses of at least 6 bytes are returned.
|
||||
func getHardwareInterface(name string) (string, []byte) {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
return ifs.Name, ifs.HardwareAddr
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
||||
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
|
||||
case string:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// see Parse for required string format
|
||||
u, err := Parse(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Scan: %v", err)
|
||||
}
|
||||
|
||||
*uuid = u
|
||||
|
||||
case []byte:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if len(src) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// assumes a simple slice of bytes if 16 bytes
|
||||
// otherwise attempts to parse
|
||||
if len(src) != 16 {
|
||||
return uuid.Scan(string(src))
|
||||
}
|
||||
copy((*uuid)[:], src)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements sql.Valuer so that UUIDs can be written to databases
|
||||
// transparently. Currently, UUIDs map to strings. Please consult
|
||||
// database-specific driver documentation for matching types.
|
||||
func (uuid UUID) Value() (driver.Value, error) {
|
||||
return uuid.String(), nil
|
||||
}
|
||||
123
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
123
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
timeMu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clockSeq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clockSeq, nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence is used, a new
|
||||
// random clock sequence is generated the first time a clock sequence is
|
||||
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
|
||||
func ClockSequence() int {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clockSeq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
oldSeq := clockSeq
|
||||
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if oldSeq != clockSeq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. The time is only defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) Time() Time {
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
return Time(time)
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid.
|
||||
// The clock sequence is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() int {
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
|
||||
}
|
||||
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
||||
296
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
296
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
@@ -0,0 +1,296 @@
|
||||
// Copyright 2018 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID [16]byte
|
||||
|
||||
// A Version represents a UUID's version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUID's variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
||||
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
||||
func Parse(s string) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(s) {
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36:
|
||||
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9:
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
case 36 + 2:
|
||||
s = s[1:]
|
||||
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
case 32:
|
||||
var ok bool
|
||||
for i := range uuid {
|
||||
uuid[i], ok = xtob(s[i*2], s[i*2+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(s[x], s[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
||||
func ParseBytes(b []byte) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(b) {
|
||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||
}
|
||||
b = b[9:]
|
||||
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
b = b[1:]
|
||||
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
var ok bool
|
||||
for i := 0; i < 32; i += 2 {
|
||||
uuid[i/2], ok = xtob(b[i], b[i+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(b[x], b[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the string cannot be parsed.
|
||||
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
||||
func MustParse(s string) UUID {
|
||||
uuid, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`uuid: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||
// does not have a length of 16. The bytes are copied from the slice.
|
||||
func FromBytes(b []byte) (uuid UUID, err error) {
|
||||
err = uuid.UnmarshalBinary(b)
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// Must returns uuid if err is nil and panics otherwise.
|
||||
func Must(uuid UUID, err error) UUID {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
var buf [36]byte
|
||||
encodeHex(buf[:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
var buf [36 + 9]byte
|
||||
copy(buf[:], "urn:uuid:")
|
||||
encodeHex(buf[9:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
func encodeHex(dst []byte, uuid UUID) {
|
||||
hex.Encode(dst, uuid[:4])
|
||||
dst[8] = '-'
|
||||
hex.Encode(dst[9:13], uuid[4:6])
|
||||
dst[13] = '-'
|
||||
hex.Encode(dst[14:18], uuid[6:8])
|
||||
dst[18] = '-'
|
||||
hex.Encode(dst[19:23], uuid[8:10])
|
||||
dst[23] = '-'
|
||||
hex.Encode(dst[24:], uuid[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
}
|
||||
|
||||
// Version returns the version of uuid.
|
||||
func (uuid UUID) Version() Version {
|
||||
return Version(uuid[6] >> 4)
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implements io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil and an error.
|
||||
//
|
||||
// In most cases, New should be used.
|
||||
func NewUUID() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
timeLow := uint32(now & 0xffffffff)
|
||||
timeMid := uint16((now >> 32) & 0xffff)
|
||||
timeHi := uint16((now >> 48) & 0x0fff)
|
||||
timeHi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], timeLow)
|
||||
binary.BigEndian.PutUint16(uuid[4:], timeMid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], timeHi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
76
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
76
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "io"
|
||||
|
||||
// New creates a new random UUID or panics. New is equivalent to
|
||||
// the expression
|
||||
//
|
||||
// uuid.Must(uuid.NewRandom())
|
||||
func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
var uuid UUID
|
||||
_, err := io.ReadFull(r, uuid[:])
|
||||
if err != nil {
|
||||
return Nil, err
|
||||
}
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
22
vendor/github.com/jarcoal/httpmock/.gitignore
generated
vendored
Normal file
22
vendor/github.com/jarcoal/httpmock/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
20
vendor/github.com/jarcoal/httpmock/LICENSE
generated
vendored
Normal file
20
vendor/github.com/jarcoal/httpmock/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Jared Morse
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
257
vendor/github.com/jarcoal/httpmock/README.md
generated
vendored
Normal file
257
vendor/github.com/jarcoal/httpmock/README.md
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
# httpmock [](https://github.com/jarcoal/httpmock/actions?query=workflow%3ABuild) [](https://coveralls.io/github/jarcoal/httpmock?branch=v1) [](https://godoc.org/github.com/jarcoal/httpmock) [](https://github.com/jarcoal/httpmock/releases) [](https://github.com/avelino/awesome-go/#testing)
|
||||
|
||||
Easy mocking of http responses from external resources.
|
||||
|
||||
## Install
|
||||
|
||||
Currently supports Go 1.13 to 1.21 and is regularly tested against tip.
|
||||
|
||||
`v1` branch has to be used instead of `master`.
|
||||
|
||||
In your go files, simply use:
|
||||
```go
|
||||
import "github.com/jarcoal/httpmock"
|
||||
```
|
||||
|
||||
Then next `go mod tidy` or `go test` invocation will automatically
|
||||
populate your `go.mod` with the latest httpmock release, now
|
||||
[](https://github.com/jarcoal/httpmock/releases).
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Simple Example:
|
||||
```go
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// Exact URL match
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// Regexp match (could use httpmock.RegisterRegexpResponder instead)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/\d+\z`,
|
||||
httpmock.NewStringResponder(200, `{"id": 1, "name": "My Great Article"}`))
|
||||
|
||||
// do stuff that makes a request to articles
|
||||
...
|
||||
|
||||
// get count info
|
||||
httpmock.GetTotalCallCount()
|
||||
|
||||
// get the amount of calls for the registered responder
|
||||
info := httpmock.GetCallCountInfo()
|
||||
info["GET https://api.mybiz.com/articles"] // number of GET calls made to https://api.mybiz.com/articles
|
||||
info["GET https://api.mybiz.com/articles/id/12"] // number of GET calls made to https://api.mybiz.com/articles/id/12
|
||||
info[`GET =~^https://api\.mybiz\.com/articles/id/\d+\z`] // number of GET calls made to https://api.mybiz.com/articles/id/<any-number>
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Example:
|
||||
```go
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// our database of articles
|
||||
articles := make([]map[string]interface{}, 0)
|
||||
|
||||
// mock to list out the articles
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := httpmock.NewJsonResponse(200, articles)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
})
|
||||
|
||||
// return an article related to the request with the help of regexp submatch (\d+)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/(\d+)\z`,
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
// Get ID from request
|
||||
id := httpmock.MustGetSubmatchAsUint(req, 1) // 1=first regexp submatch
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"id": id,
|
||||
"name": "My Great Article",
|
||||
})
|
||||
})
|
||||
|
||||
// mock to add a new article
|
||||
httpmock.RegisterResponder("POST", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
article := make(map[string]interface{})
|
||||
if err := json.NewDecoder(req.Body).Decode(&article); err != nil {
|
||||
return httpmock.NewStringResponse(400, ""), nil
|
||||
}
|
||||
|
||||
articles = append(articles, article)
|
||||
|
||||
resp, err := httpmock.NewJsonResponse(200, article)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
})
|
||||
|
||||
// mock to add a specific article, send a Bad Request response
|
||||
// when the request body contains `"type":"toy"`
|
||||
httpmock.RegisterMatcherResponder("POST", "https://api.mybiz.com/articles",
|
||||
httpmock.BodyContainsString(`"type":"toy"`),
|
||||
httpmock.NewStringResponder(400, `{"reason":"Invalid article type"}`))
|
||||
|
||||
// do stuff that adds and checks articles
|
||||
}
|
||||
```
|
||||
|
||||
### Algorithm
|
||||
|
||||
When `GET http://example.tld/some/path?b=12&a=foo&a=bar` request is
|
||||
caught, all standard responders are checked against the following URL
|
||||
or paths, the first match stops the search:
|
||||
|
||||
1. `http://example.tld/some/path?b=12&a=foo&a=bar` (original URL)
|
||||
1. `http://example.tld/some/path?a=bar&a=foo&b=12` (sorted query params)
|
||||
1. `http://example.tld/some/path` (without query params)
|
||||
1. `/some/path?b=12&a=foo&a=bar` (original URL without scheme and host)
|
||||
1. `/some/path?a=bar&a=foo&b=12` (same, but sorted query params)
|
||||
1. `/some/path` (path only)
|
||||
|
||||
If no standard responder matched, the regexp responders are checked,
|
||||
in the same order, the first match stops the search.
|
||||
|
||||
|
||||
### [go-testdeep](https://go-testdeep.zetta.rocks/) + [tdsuite](https://pkg.go.dev/github.com/maxatome/go-testdeep/helpers/tdsuite) example:
|
||||
```go
|
||||
// article_test.go
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/maxatome/go-testdeep/helpers/tdsuite"
|
||||
"github.com/maxatome/go-testdeep/td"
|
||||
)
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
func (s *MySuite) Setup(t *td.T) error {
|
||||
// block all HTTP requests
|
||||
httpmock.Activate()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MySuite) PostTest(t *td.T, testName string) error {
|
||||
// remove any mocks after each test
|
||||
httpmock.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MySuite) Destroy(t *td.T) error {
|
||||
httpmock.DeactivateAndReset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMySuite(t *testing.T) {
|
||||
tdsuite.Run(t, &MySuite{})
|
||||
}
|
||||
|
||||
func (s *MySuite) TestArticles(assert, require *td.T) {
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles.json",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// do stuff that makes a request to articles.json
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### [Ginkgo](https://onsi.github.io/ginkgo/) example:
|
||||
```go
|
||||
// article_suite_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
)
|
||||
// ...
|
||||
var _ = BeforeSuite(func() {
|
||||
// block all HTTP requests
|
||||
httpmock.Activate()
|
||||
})
|
||||
|
||||
var _ = BeforeEach(func() {
|
||||
// remove any mocks
|
||||
httpmock.Reset()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
httpmock.DeactivateAndReset()
|
||||
})
|
||||
|
||||
|
||||
// article_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
)
|
||||
|
||||
var _ = Describe("Articles", func() {
|
||||
It("returns a list of articles", func() {
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles.json",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// do stuff that makes a request to articles.json
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### [Ginkgo](https://onsi.github.io/ginkgo/) + [Resty](https://github.com/go-resty/resty) Example:
|
||||
```go
|
||||
// article_suite_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/go-resty/resty"
|
||||
)
|
||||
// ...
|
||||
var _ = BeforeSuite(func() {
|
||||
// block all HTTP requests
|
||||
httpmock.ActivateNonDefault(resty.DefaultClient.GetClient())
|
||||
})
|
||||
|
||||
var _ = BeforeEach(func() {
|
||||
// remove any mocks
|
||||
httpmock.Reset()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
httpmock.DeactivateAndReset()
|
||||
})
|
||||
|
||||
|
||||
// article_test.go
|
||||
|
||||
import (
|
||||
// ...
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/go-resty/resty"
|
||||
)
|
||||
|
||||
var _ = Describe("Articles", func() {
|
||||
It("returns a list of articles", func() {
|
||||
fixture := `{"status":{"message": "Your message", "code": 200}}`
|
||||
responder := httpmock.NewStringResponder(200, fixture)
|
||||
fakeUrl := "https://api.mybiz.com/articles.json"
|
||||
httpmock.RegisterResponder("GET", fakeUrl, responder)
|
||||
|
||||
// fetch the article into struct
|
||||
articleObject := &models.Article{}
|
||||
_, err := resty.R().SetResult(articleObject).Get(fakeUrl)
|
||||
|
||||
// do stuff with the article object ...
|
||||
})
|
||||
})
|
||||
```
|
||||
6
vendor/github.com/jarcoal/httpmock/any.go
generated
vendored
Normal file
6
vendor/github.com/jarcoal/httpmock/any.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package httpmock
|
||||
|
||||
type any = interface{}
|
||||
83
vendor/github.com/jarcoal/httpmock/doc.go
generated
vendored
Normal file
83
vendor/github.com/jarcoal/httpmock/doc.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Package httpmock provides tools for mocking HTTP responses.
|
||||
|
||||
Simple Example:
|
||||
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// Exact URL match
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
httpmock.NewStringResponder(200, `[{"id": 1, "name": "My Great Article"}]`))
|
||||
|
||||
// Regexp match (could use httpmock.RegisterRegexpResponder instead)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/\d+\z`,
|
||||
httpmock.NewStringResponder(200, `{"id": 1, "name": "My Great Article"}`))
|
||||
|
||||
// do stuff that makes a request to articles
|
||||
|
||||
// get count info
|
||||
httpmock.GetTotalCallCount()
|
||||
|
||||
// get the amount of calls for the registered responder
|
||||
info := httpmock.GetCallCountInfo()
|
||||
info["GET https://api.mybiz.com/articles"] // number of GET calls made to https://api.mybiz.com/articles
|
||||
info["GET https://api.mybiz.com/articles/id/12"] // number of GET calls made to https://api.mybiz.com/articles/id/12
|
||||
info[`GET =~^https://api\.mybiz\.com/articles/id/\d+\z`] // number of GET calls made to https://api.mybiz.com/articles/id/<any-number>
|
||||
}
|
||||
|
||||
Advanced Example:
|
||||
|
||||
func TestFetchArticles(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
// our database of articles
|
||||
articles := make([]map[string]any, 0)
|
||||
|
||||
// mock to list out the articles
|
||||
httpmock.RegisterResponder("GET", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := httpmock.NewJsonResponse(200, articles)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
)
|
||||
|
||||
// return an article related to the request with the help of regexp submatch (\d+)
|
||||
httpmock.RegisterResponder("GET", `=~^https://api\.mybiz\.com/articles/id/(\d+)\z`,
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
// Get ID from request
|
||||
id := httpmock.MustGetSubmatchAsUint(req, 1) // 1=first regexp submatch
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"id": id,
|
||||
"name": "My Great Article",
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
// mock to add a new article
|
||||
httpmock.RegisterResponder("POST", "https://api.mybiz.com/articles",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
article := make(map[string]any)
|
||||
if err := json.NewDecoder(req.Body).Decode(&article); err != nil {
|
||||
return httpmock.NewStringResponse(400, ""), nil
|
||||
}
|
||||
|
||||
articles = append(articles, article)
|
||||
|
||||
resp, err := httpmock.NewJsonResponse(200, article)
|
||||
if err != nil {
|
||||
return httpmock.NewStringResponse(500, ""), nil
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
)
|
||||
|
||||
// do stuff that adds and checks articles
|
||||
}
|
||||
*/
|
||||
package httpmock
|
||||
13
vendor/github.com/jarcoal/httpmock/env.go
generated
vendored
Normal file
13
vendor/github.com/jarcoal/httpmock/env.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
package httpmock
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
var envVarName = "GONOMOCKS"
|
||||
|
||||
// Disabled allows to test whether httpmock is enabled or not. It
|
||||
// depends on GONOMOCKS environment variable.
|
||||
func Disabled() bool {
|
||||
return os.Getenv(envVarName) != ""
|
||||
}
|
||||
63
vendor/github.com/jarcoal/httpmock/file.go
generated
vendored
Normal file
63
vendor/github.com/jarcoal/httpmock/file.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package httpmock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil" //nolint: staticcheck
|
||||
)
|
||||
|
||||
// File is a file name. The contents of this file is loaded on demand
|
||||
// by the following methods.
|
||||
//
|
||||
// Note that:
|
||||
//
|
||||
// file := httpmock.File("file.txt")
|
||||
// fmt.Printf("file: %s\n", file)
|
||||
//
|
||||
// prints the content of file "file.txt" as [File.String] method is used.
|
||||
//
|
||||
// To print the file name, and not its content, simply do:
|
||||
//
|
||||
// file := httpmock.File("file.txt")
|
||||
// fmt.Printf("file: %s\n", string(file))
|
||||
type File string
|
||||
|
||||
// MarshalJSON implements [encoding/json.Marshaler].
|
||||
//
|
||||
// Useful to be used in conjunction with [NewJsonResponse] or
|
||||
// [NewJsonResponder] as in:
|
||||
//
|
||||
// httpmock.NewJsonResponder(200, httpmock.File("body.json"))
|
||||
func (f File) MarshalJSON() ([]byte, error) {
|
||||
return f.bytes()
|
||||
}
|
||||
|
||||
func (f File) bytes() ([]byte, error) {
|
||||
return ioutil.ReadFile(string(f))
|
||||
}
|
||||
|
||||
// Bytes returns the content of file as a []byte. If an error occurs
|
||||
// during the opening or reading of the file, it panics.
|
||||
//
|
||||
// Useful to be used in conjunction with [NewBytesResponse] or
|
||||
// [NewBytesResponder] as in:
|
||||
//
|
||||
// httpmock.NewBytesResponder(200, httpmock.File("body.raw").Bytes())
|
||||
func (f File) Bytes() []byte {
|
||||
b, err := f.bytes()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Cannot read %s: %s", string(f), err))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// String implements [fmt.Stringer] and returns the content of file as
|
||||
// a string. If an error occurs during the opening or reading of the
|
||||
// file, it panics.
|
||||
//
|
||||
// Useful to be used in conjunction with [NewStringResponse] or
|
||||
// [NewStringResponder] as in:
|
||||
//
|
||||
// httpmock.NewStringResponder(200, httpmock.File("body.txt").String())
|
||||
func (f File) String() string {
|
||||
return string(f.Bytes())
|
||||
}
|
||||
41
vendor/github.com/jarcoal/httpmock/internal/error.go
generated
vendored
Normal file
41
vendor/github.com/jarcoal/httpmock/internal/error.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// NoResponderFound is returned when no responders are found for a
|
||||
// given HTTP method and URL.
|
||||
var NoResponderFound = errors.New("no responder found") // nolint: revive
|
||||
|
||||
// ErrorNoResponderFoundMistake encapsulates a NoResponderFound
|
||||
// error probably due to a user error on the method or URL path.
|
||||
type ErrorNoResponderFoundMistake struct {
|
||||
Kind string // "method", "URL" or "matcher"
|
||||
Orig string // original wrong method/URL, without any matching responder
|
||||
Suggested string // suggested method/URL with a matching responder
|
||||
}
|
||||
|
||||
var _ error = (*ErrorNoResponderFoundMistake)(nil)
|
||||
|
||||
// Unwrap implements the interface needed by errors.Unwrap.
|
||||
func (e *ErrorNoResponderFoundMistake) Unwrap() error {
|
||||
return NoResponderFound
|
||||
}
|
||||
|
||||
// Error implements error interface.
|
||||
func (e *ErrorNoResponderFoundMistake) Error() string {
|
||||
if e.Kind == "matcher" {
|
||||
return fmt.Sprintf("%s despite %s",
|
||||
NoResponderFound,
|
||||
e.Suggested,
|
||||
)
|
||||
}
|
||||
return fmt.Sprintf("%[1]s for %[2]s %[3]q, but one matches %[2]s %[4]q",
|
||||
NoResponderFound,
|
||||
e.Kind,
|
||||
e.Orig,
|
||||
e.Suggested,
|
||||
)
|
||||
}
|
||||
15
vendor/github.com/jarcoal/httpmock/internal/route_key.go
generated
vendored
Normal file
15
vendor/github.com/jarcoal/httpmock/internal/route_key.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package internal
|
||||
|
||||
type RouteKey struct {
|
||||
Method string
|
||||
URL string
|
||||
}
|
||||
|
||||
var NoResponder RouteKey
|
||||
|
||||
func (r RouteKey) String() string {
|
||||
if r == NoResponder {
|
||||
return "NO_RESPONDER"
|
||||
}
|
||||
return r.Method + " " + r.URL
|
||||
}
|
||||
91
vendor/github.com/jarcoal/httpmock/internal/stack_tracer.go
generated
vendored
Normal file
91
vendor/github.com/jarcoal/httpmock/internal/stack_tracer.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type StackTracer struct {
|
||||
CustomFn func(...interface{})
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error implements error interface.
|
||||
func (s StackTracer) Error() string {
|
||||
if s.Err == nil {
|
||||
return ""
|
||||
}
|
||||
return s.Err.Error()
|
||||
}
|
||||
|
||||
// Unwrap implements the interface needed by errors.Unwrap.
|
||||
func (s StackTracer) Unwrap() error {
|
||||
return s.Err
|
||||
}
|
||||
|
||||
// CheckStackTracer checks for specific error returned by
|
||||
// NewNotFoundResponder function or Trace Responder method.
|
||||
func CheckStackTracer(req *http.Request, err error) error {
|
||||
if nf, ok := err.(StackTracer); ok {
|
||||
if nf.CustomFn != nil {
|
||||
pc := make([]uintptr, 128)
|
||||
npc := runtime.Callers(2, pc)
|
||||
pc = pc[:npc]
|
||||
|
||||
var mesg bytes.Buffer
|
||||
var netHTTPBegin, netHTTPEnd bool
|
||||
|
||||
// Start recording at first net/http call if any...
|
||||
for {
|
||||
frames := runtime.CallersFrames(pc)
|
||||
|
||||
var lastFn string
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
|
||||
if !netHTTPEnd {
|
||||
if netHTTPBegin {
|
||||
netHTTPEnd = !strings.HasPrefix(frame.Function, "net/http.")
|
||||
} else {
|
||||
netHTTPBegin = strings.HasPrefix(frame.Function, "net/http.")
|
||||
}
|
||||
}
|
||||
|
||||
if netHTTPEnd {
|
||||
if lastFn != "" {
|
||||
if mesg.Len() == 0 {
|
||||
if nf.Err != nil {
|
||||
mesg.WriteString(nf.Err.Error())
|
||||
} else {
|
||||
fmt.Fprintf(&mesg, "%s %s", req.Method, req.URL)
|
||||
}
|
||||
mesg.WriteString("\nCalled from ")
|
||||
} else {
|
||||
mesg.WriteString("\n ")
|
||||
}
|
||||
fmt.Fprintf(&mesg, "%s()\n at %s:%d", lastFn, frame.File, frame.Line)
|
||||
}
|
||||
}
|
||||
lastFn = frame.Function
|
||||
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// At least one net/http frame found
|
||||
if mesg.Len() > 0 {
|
||||
break
|
||||
}
|
||||
netHTTPEnd = true // retry without looking at net/http frames
|
||||
}
|
||||
|
||||
nf.CustomFn(mesg.String())
|
||||
}
|
||||
err = nf.Err
|
||||
}
|
||||
return err
|
||||
}
|
||||
22
vendor/github.com/jarcoal/httpmock/internal/submatches.go
generated
vendored
Normal file
22
vendor/github.com/jarcoal/httpmock/internal/submatches.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type submatchesKeyType struct{}
|
||||
|
||||
var submatchesKey submatchesKeyType
|
||||
|
||||
func SetSubmatches(req *http.Request, submatches []string) *http.Request {
|
||||
if len(submatches) > 0 {
|
||||
return req.WithContext(context.WithValue(req.Context(), submatchesKey, submatches))
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func GetSubmatches(req *http.Request) []string {
|
||||
sm, _ := req.Context().Value(submatchesKey).([]string)
|
||||
return sm
|
||||
}
|
||||
519
vendor/github.com/jarcoal/httpmock/match.go
generated
vendored
Normal file
519
vendor/github.com/jarcoal/httpmock/match.go
generated
vendored
Normal file
@@ -0,0 +1,519 @@
|
||||
package httpmock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil" //nolint: staticcheck
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/jarcoal/httpmock/internal"
|
||||
)
|
||||
|
||||
var ignorePackages = map[string]bool{}
|
||||
|
||||
func init() {
|
||||
IgnoreMatcherHelper()
|
||||
}
|
||||
|
||||
// IgnoreMatcherHelper should be called by external helpers building
|
||||
// [Matcher], typically in an init() function, to avoid they appear in
|
||||
// the autogenerated [Matcher] names.
|
||||
func IgnoreMatcherHelper(skip ...int) {
|
||||
sk := 2
|
||||
if len(skip) > 0 {
|
||||
sk += skip[0]
|
||||
}
|
||||
if pkg := getPackage(sk); pkg != "" {
|
||||
ignorePackages[pkg] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Copied from github.com/maxatome/go-testdeep/internal/trace.getPackage.
|
||||
func getPackage(skip int) string {
|
||||
if pc, _, _, ok := runtime.Caller(skip); ok {
|
||||
if fn := runtime.FuncForPC(pc); fn != nil {
|
||||
return extractPackage(fn.Name())
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractPackage extracts package part from a fully qualified function name:
|
||||
//
|
||||
// "foo/bar/test.fn" → "foo/bar/test"
|
||||
// "foo/bar/test.X.fn" → "foo/bar/test"
|
||||
// "foo/bar/test.(*X).fn" → "foo/bar/test"
|
||||
// "foo/bar/test.(*X).fn.func1" → "foo/bar/test"
|
||||
// "weird" → ""
|
||||
//
|
||||
// Derived from github.com/maxatome/go-testdeep/internal/trace.SplitPackageFunc.
|
||||
func extractPackage(fn string) string {
|
||||
sp := strings.LastIndexByte(fn, '/')
|
||||
if sp < 0 {
|
||||
sp = 0 // std package
|
||||
}
|
||||
|
||||
dp := strings.IndexByte(fn[sp:], '.')
|
||||
if dp < 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fn[:sp+dp]
|
||||
}
|
||||
|
||||
// calledFrom returns a string like "@PKG.FUNC() FILE:LINE".
|
||||
func calledFrom(skip int) string {
|
||||
pc := make([]uintptr, 128)
|
||||
npc := runtime.Callers(skip+1, pc)
|
||||
pc = pc[:npc]
|
||||
|
||||
frames := runtime.CallersFrames(pc)
|
||||
|
||||
var lastFrame runtime.Frame
|
||||
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
|
||||
// If testing package is encountered, it is too late
|
||||
if strings.HasPrefix(frame.Function, "testing.") {
|
||||
break
|
||||
}
|
||||
lastFrame = frame
|
||||
// Stop if httpmock is not the caller
|
||||
if !ignorePackages[extractPackage(frame.Function)] || !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if lastFrame.Line == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(" @%s() %s:%d",
|
||||
lastFrame.Function, lastFrame.File, lastFrame.Line)
|
||||
}
|
||||
|
||||
// MatcherFunc type is the function to use to check a [Matcher]
|
||||
// matches an incoming request. When httpmock calls a function of this
|
||||
// type, it is guaranteed req.Body is never nil. If req.Body is nil in
|
||||
// the original request, it is temporarily replaced by an instance
|
||||
// returning always [io.EOF] for each Read() call, during the call.
|
||||
type MatcherFunc func(req *http.Request) bool
|
||||
|
||||
func matcherFuncOr(mfs []MatcherFunc) MatcherFunc {
|
||||
return func(req *http.Request) bool {
|
||||
for _, mf := range mfs {
|
||||
rearmBody(req)
|
||||
if mf(req) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func matcherFuncAnd(mfs []MatcherFunc) MatcherFunc {
|
||||
if len(mfs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return func(req *http.Request) bool {
|
||||
for _, mf := range mfs {
|
||||
rearmBody(req)
|
||||
if !mf(req) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check returns true if mf is nil, otherwise it returns mf(req).
|
||||
func (mf MatcherFunc) Check(req *http.Request) bool {
|
||||
return mf == nil || mf(req)
|
||||
}
|
||||
|
||||
// Or combines mf and all mfs in a new [MatcherFunc]. This new
|
||||
// [MatcherFunc] succeeds if one of mf or mfs succeeds. Note that as a
|
||||
// a nil [MatcherFunc] is considered succeeding, if mf or one of mfs
|
||||
// items is nil, nil is returned.
|
||||
func (mf MatcherFunc) Or(mfs ...MatcherFunc) MatcherFunc {
|
||||
if len(mfs) == 0 || mf == nil {
|
||||
return mf
|
||||
}
|
||||
cmfs := make([]MatcherFunc, len(mfs)+1)
|
||||
cmfs[0] = mf
|
||||
for i, cur := range mfs {
|
||||
if cur == nil {
|
||||
return nil
|
||||
}
|
||||
cmfs[i+1] = cur
|
||||
}
|
||||
return matcherFuncOr(cmfs)
|
||||
}
|
||||
|
||||
// And combines mf and all mfs in a new [MatcherFunc]. This new
|
||||
// [MatcherFunc] succeeds if all of mf and mfs succeed. Note that a
|
||||
// [MatcherFunc] also succeeds if it is nil, so if mf and all mfs
|
||||
// items are nil, nil is returned.
|
||||
func (mf MatcherFunc) And(mfs ...MatcherFunc) MatcherFunc {
|
||||
if len(mfs) == 0 {
|
||||
return mf
|
||||
}
|
||||
cmfs := make([]MatcherFunc, 0, len(mfs)+1)
|
||||
if mf != nil {
|
||||
cmfs = append(cmfs, mf)
|
||||
}
|
||||
for _, cur := range mfs {
|
||||
if cur != nil {
|
||||
cmfs = append(cmfs, cur)
|
||||
}
|
||||
}
|
||||
return matcherFuncAnd(cmfs)
|
||||
}
|
||||
|
||||
// Matcher type defines a match case. The zero Matcher{} corresponds
|
||||
// to the default case. Otherwise, use [NewMatcher] or any helper
|
||||
// building a [Matcher] like [BodyContainsBytes], [BodyContainsBytes],
|
||||
// [HeaderExists], [HeaderIs], [HeaderContains] or any of
|
||||
// [github.com/maxatome/tdhttpmock] functions.
|
||||
type Matcher struct {
|
||||
name string
|
||||
fn MatcherFunc // can be nil → means always true
|
||||
}
|
||||
|
||||
var matcherID int64
|
||||
|
||||
// NewMatcher returns a [Matcher]. If name is empty and fn is non-nil,
|
||||
// a name is automatically generated. When fn is nil, it is a default
|
||||
// [Matcher]: its name can be empty.
|
||||
//
|
||||
// Automatically generated names have the form:
|
||||
//
|
||||
// ~HEXANUMBER@PKG.FUNC() FILE:LINE
|
||||
//
|
||||
// Legend:
|
||||
// - HEXANUMBER is a unique 10 digit hexadecimal number, always increasing;
|
||||
// - PKG is the NewMatcher caller package (except if
|
||||
// [IgnoreMatcherHelper] has been previously called, in this case it
|
||||
// is the caller of the caller package and so on);
|
||||
// - FUNC is the function name of the caller in the previous PKG package;
|
||||
// - FILE and LINE are the location of the call in FUNC function.
|
||||
func NewMatcher(name string, fn MatcherFunc) Matcher {
|
||||
if name == "" && fn != nil {
|
||||
// Auto-name the matcher
|
||||
name = fmt.Sprintf("~%010x%s", atomic.AddInt64(&matcherID, 1), calledFrom(1))
|
||||
}
|
||||
return Matcher{
|
||||
name: name,
|
||||
fn: fn,
|
||||
}
|
||||
}
|
||||
|
||||
// BodyContainsBytes returns a [Matcher] checking that request body
|
||||
// contains subslice.
|
||||
//
|
||||
// The name of the returned [Matcher] is auto-generated (see [NewMatcher]).
|
||||
// To name it explicitly, use [Matcher.WithName] as in:
|
||||
//
|
||||
// BodyContainsBytes([]byte("foo")).WithName("10-body-contains-foo")
|
||||
//
|
||||
// See also [github.com/maxatome/tdhttpmock.Body],
|
||||
// [github.com/maxatome/tdhttpmock.JSONBody] and
|
||||
// [github.com/maxatome/tdhttpmock.XMLBody] for powerful body testing.
|
||||
func BodyContainsBytes(subslice []byte) Matcher {
|
||||
return NewMatcher("",
|
||||
func(req *http.Request) bool {
|
||||
rearmBody(req)
|
||||
b, err := ioutil.ReadAll(req.Body)
|
||||
return err == nil && bytes.Contains(b, subslice)
|
||||
})
|
||||
}
|
||||
|
||||
// BodyContainsString returns a [Matcher] checking that request body
|
||||
// contains substr.
|
||||
//
|
||||
// The name of the returned [Matcher] is auto-generated (see [NewMatcher]).
|
||||
// To name it explicitly, use [Matcher.WithName] as in:
|
||||
//
|
||||
// BodyContainsString("foo").WithName("10-body-contains-foo")
|
||||
//
|
||||
// See also [github.com/maxatome/tdhttpmock.Body],
|
||||
// [github.com/maxatome/tdhttpmock.JSONBody] and
|
||||
// [github.com/maxatome/tdhttpmock.XMLBody] for powerful body testing.
|
||||
func BodyContainsString(substr string) Matcher {
|
||||
return NewMatcher("",
|
||||
func(req *http.Request) bool {
|
||||
rearmBody(req)
|
||||
b, err := ioutil.ReadAll(req.Body)
|
||||
return err == nil && bytes.Contains(b, []byte(substr))
|
||||
})
|
||||
}
|
||||
|
||||
// HeaderExists returns a [Matcher] checking that request contains
|
||||
// key header.
|
||||
//
|
||||
// The name of the returned [Matcher] is auto-generated (see [NewMatcher]).
|
||||
// To name it explicitly, use [Matcher.WithName] as in:
|
||||
//
|
||||
// HeaderExists("X-Custom").WithName("10-custom-exists")
|
||||
//
|
||||
// See also [github.com/maxatome/tdhttpmock.Header] for powerful
|
||||
// header testing.
|
||||
func HeaderExists(key string) Matcher {
|
||||
return NewMatcher("",
|
||||
func(req *http.Request) bool {
|
||||
_, ok := req.Header[key]
|
||||
return ok
|
||||
})
|
||||
}
|
||||
|
||||
// HeaderIs returns a [Matcher] checking that request contains
|
||||
// key header set to value.
|
||||
//
|
||||
// The name of the returned [Matcher] is auto-generated (see [NewMatcher]).
|
||||
// To name it explicitly, use [Matcher.WithName] as in:
|
||||
//
|
||||
// HeaderIs("X-Custom", "VALUE").WithName("10-custom-is-value")
|
||||
//
|
||||
// See also [github.com/maxatome/tdhttpmock.Header] for powerful
|
||||
// header testing.
|
||||
func HeaderIs(key, value string) Matcher {
|
||||
return NewMatcher("",
|
||||
func(req *http.Request) bool {
|
||||
return req.Header.Get(key) == value
|
||||
})
|
||||
}
|
||||
|
||||
// HeaderContains returns a [Matcher] checking that request contains key
|
||||
// header itself containing substr.
|
||||
//
|
||||
// The name of the returned [Matcher] is auto-generated (see [NewMatcher]).
|
||||
// To name it explicitly, use [Matcher.WithName] as in:
|
||||
//
|
||||
// HeaderContains("X-Custom", "VALUE").WithName("10-custom-contains-value")
|
||||
//
|
||||
// See also [github.com/maxatome/tdhttpmock.Header] for powerful
|
||||
// header testing.
|
||||
func HeaderContains(key, substr string) Matcher {
|
||||
return NewMatcher("",
|
||||
func(req *http.Request) bool {
|
||||
return strings.Contains(req.Header.Get(key), substr)
|
||||
})
|
||||
}
|
||||
|
||||
// Name returns the m's name.
|
||||
func (m Matcher) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
// WithName returns a new [Matcher] based on m with name name.
|
||||
func (m Matcher) WithName(name string) Matcher {
|
||||
return NewMatcher(name, m.fn)
|
||||
}
|
||||
|
||||
// Check returns true if req is matched by m.
|
||||
func (m Matcher) Check(req *http.Request) bool {
|
||||
return m.fn.Check(req)
|
||||
}
|
||||
|
||||
// Or combines m and all ms in a new [Matcher]. This new [Matcher]
|
||||
// succeeds if one of m or ms succeeds. Note that as a [Matcher]
|
||||
// succeeds if internal fn is nil, if m's internal fn or any of ms
|
||||
// item's internal fn is nil, the returned [Matcher] always
|
||||
// succeeds. The name of returned [Matcher] is m's one.
|
||||
func (m Matcher) Or(ms ...Matcher) Matcher {
|
||||
if len(ms) == 0 || m.fn == nil {
|
||||
return m
|
||||
}
|
||||
mfs := make([]MatcherFunc, 1, len(ms)+1)
|
||||
mfs[0] = m.fn
|
||||
for _, cur := range ms {
|
||||
if cur.fn == nil {
|
||||
return Matcher{}
|
||||
}
|
||||
mfs = append(mfs, cur.fn)
|
||||
}
|
||||
m.fn = matcherFuncOr(mfs)
|
||||
return m
|
||||
}
|
||||
|
||||
// And combines m and all ms in a new [Matcher]. This new [Matcher]
|
||||
// succeeds if all of m and ms succeed. Note that a [Matcher] also
|
||||
// succeeds if [Matcher] [MatcherFunc] is nil. The name of returned
|
||||
// [Matcher] is m's one if the empty/default [Matcher] is returned.
|
||||
func (m Matcher) And(ms ...Matcher) Matcher {
|
||||
if len(ms) == 0 {
|
||||
return m
|
||||
}
|
||||
mfs := make([]MatcherFunc, 0, len(ms)+1)
|
||||
if m.fn != nil {
|
||||
mfs = append(mfs, m.fn)
|
||||
}
|
||||
for _, cur := range ms {
|
||||
if cur.fn != nil {
|
||||
mfs = append(mfs, cur.fn)
|
||||
}
|
||||
}
|
||||
m.fn = matcherFuncAnd(mfs)
|
||||
if m.fn != nil {
|
||||
return m
|
||||
}
|
||||
return Matcher{}
|
||||
}
|
||||
|
||||
type matchResponder struct {
|
||||
matcher Matcher
|
||||
responder Responder
|
||||
}
|
||||
|
||||
type matchResponders []matchResponder
|
||||
|
||||
// add adds or replaces a matchResponder.
|
||||
func (mrs matchResponders) add(mr matchResponder) matchResponders {
|
||||
// default is always at end
|
||||
if mr.matcher.fn == nil {
|
||||
if len(mrs) > 0 && (mrs)[len(mrs)-1].matcher.fn == nil {
|
||||
mrs[len(mrs)-1] = mr
|
||||
return mrs
|
||||
}
|
||||
return append(mrs, mr)
|
||||
}
|
||||
|
||||
for i, cur := range mrs {
|
||||
if cur.matcher.name == mr.matcher.name {
|
||||
mrs[i] = mr
|
||||
return mrs
|
||||
}
|
||||
}
|
||||
|
||||
for i, cur := range mrs {
|
||||
if cur.matcher.fn == nil || cur.matcher.name > mr.matcher.name {
|
||||
mrs = append(mrs, matchResponder{})
|
||||
copy(mrs[i+1:], mrs[i:len(mrs)-1])
|
||||
mrs[i] = mr
|
||||
return mrs
|
||||
}
|
||||
}
|
||||
return append(mrs, mr)
|
||||
}
|
||||
|
||||
func (mrs matchResponders) checkEmptiness() matchResponders {
|
||||
if len(mrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return mrs
|
||||
}
|
||||
|
||||
func (mrs matchResponders) shrink() matchResponders {
|
||||
mrs[len(mrs)-1] = matchResponder{}
|
||||
mrs = mrs[:len(mrs)-1]
|
||||
return mrs.checkEmptiness()
|
||||
}
|
||||
|
||||
func (mrs matchResponders) remove(name string) matchResponders {
|
||||
// Special case, even if default has been renamed, we consider ""
|
||||
// matching this default
|
||||
if name == "" {
|
||||
// default is always at end
|
||||
if len(mrs) > 0 && mrs[len(mrs)-1].matcher.fn == nil {
|
||||
return mrs.shrink()
|
||||
}
|
||||
return mrs.checkEmptiness()
|
||||
}
|
||||
|
||||
for i, cur := range mrs {
|
||||
if cur.matcher.name == name {
|
||||
copy(mrs[i:], mrs[i+1:])
|
||||
return mrs.shrink()
|
||||
}
|
||||
}
|
||||
return mrs.checkEmptiness()
|
||||
}
|
||||
|
||||
func (mrs matchResponders) findMatchResponder(req *http.Request) *matchResponder {
|
||||
if len(mrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if mrs[0].matcher.fn == nil { // nil match is always the last
|
||||
return &mrs[0]
|
||||
}
|
||||
|
||||
copyBody := &bodyCopyOnRead{body: req.Body}
|
||||
req.Body = copyBody
|
||||
defer func() {
|
||||
copyBody.rearm()
|
||||
req.Body = copyBody.body
|
||||
}()
|
||||
|
||||
for _, mr := range mrs {
|
||||
copyBody.rearm()
|
||||
if mr.matcher.Check(req) {
|
||||
return &mr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type matchRouteKey struct {
|
||||
internal.RouteKey
|
||||
name string
|
||||
}
|
||||
|
||||
func (m matchRouteKey) String() string {
|
||||
if m.name == "" {
|
||||
return m.RouteKey.String()
|
||||
}
|
||||
return m.RouteKey.String() + " <" + m.name + ">"
|
||||
}
|
||||
|
||||
func rearmBody(req *http.Request) {
|
||||
if req != nil {
|
||||
if body, ok := req.Body.(interface{ rearm() }); ok {
|
||||
body.rearm()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
*bytes.Reader
|
||||
}
|
||||
|
||||
func (b buffer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// bodyCopyOnRead mutates body into a buffer on first Read(), except
|
||||
// if body is nil or http.NoBody. In this case, EOF is returned for
|
||||
// each Read() and body stays untouched.
|
||||
type bodyCopyOnRead struct {
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
func (b *bodyCopyOnRead) rearm() {
|
||||
if buf, ok := b.body.(buffer); ok {
|
||||
buf.Seek(0, io.SeekStart) //nolint:errcheck
|
||||
} // else b.body contains the original body, so don't touch
|
||||
}
|
||||
|
||||
func (b *bodyCopyOnRead) copy() {
|
||||
if _, ok := b.body.(buffer); !ok && b.body != nil && b.body != http.NoBody {
|
||||
buf, _ := ioutil.ReadAll(b.body)
|
||||
b.body.Close()
|
||||
b.body = buffer{bytes.NewReader(buf)}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bodyCopyOnRead) Read(p []byte) (n int, err error) {
|
||||
b.copy()
|
||||
if b.body == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return b.body.Read(p)
|
||||
}
|
||||
|
||||
func (b *bodyCopyOnRead) Close() error {
|
||||
return nil
|
||||
}
|
||||
756
vendor/github.com/jarcoal/httpmock/response.go
generated
vendored
Normal file
756
vendor/github.com/jarcoal/httpmock/response.go
generated
vendored
Normal file
@@ -0,0 +1,756 @@
|
||||
package httpmock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jarcoal/httpmock/internal"
|
||||
)
|
||||
|
||||
// fromThenKeyType is used by Then().
|
||||
type fromThenKeyType struct{}
|
||||
|
||||
var fromThenKey = fromThenKeyType{}
|
||||
|
||||
type suggestedInfo struct {
|
||||
kind string
|
||||
suggested string
|
||||
}
|
||||
|
||||
// suggestedMethodKeyType is used by NewNotFoundResponder().
|
||||
type suggestedKeyType struct{}
|
||||
|
||||
var suggestedKey = suggestedKeyType{}
|
||||
|
||||
// Responder is a callback that receives an [*http.Request] and returns
|
||||
// a mocked response.
|
||||
type Responder func(*http.Request) (*http.Response, error)
|
||||
|
||||
func (r Responder) times(name string, n int, fn ...func(...any)) Responder {
|
||||
count := 0
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
count++
|
||||
if count > n {
|
||||
err := internal.StackTracer{
|
||||
Err: fmt.Errorf("Responder not found for %s %s (coz %s and already called %d times)", req.Method, req.URL, name, count),
|
||||
}
|
||||
if len(fn) > 0 {
|
||||
err.CustomFn = fn[0]
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return r(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Times returns a [Responder] callable n times before returning an
|
||||
// error. If the [Responder] is called more than n times and fn is
|
||||
// passed and non-nil, it acts as the fn parameter of
|
||||
// [NewNotFoundResponder], allowing to dump the stack trace to
|
||||
// localize the origin of the call.
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/jarcoal/httpmock"
|
||||
// )
|
||||
// ...
|
||||
// func TestMyApp(t *testing.T) {
|
||||
// ...
|
||||
// // This responder is callable 3 times, then an error is returned and
|
||||
// // the stacktrace of the call logged using t.Log()
|
||||
// httpmock.RegisterResponder("GET", "/foo/bar",
|
||||
// httpmock.NewStringResponder(200, "{}").Times(3, t.Log),
|
||||
// )
|
||||
func (r Responder) Times(n int, fn ...func(...any)) Responder {
|
||||
return r.times("Times", n, fn...)
|
||||
}
|
||||
|
||||
// Once returns a new [Responder] callable once before returning an
|
||||
// error. If the [Responder] is called 2 or more times and fn is passed
|
||||
// and non-nil, it acts as the fn parameter of [NewNotFoundResponder],
|
||||
// allowing to dump the stack trace to localize the origin of the
|
||||
// call.
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/jarcoal/httpmock"
|
||||
// )
|
||||
// ...
|
||||
// func TestMyApp(t *testing.T) {
|
||||
// ...
|
||||
// // This responder is callable only once, then an error is returned and
|
||||
// // the stacktrace of the call logged using t.Log()
|
||||
// httpmock.RegisterResponder("GET", "/foo/bar",
|
||||
// httpmock.NewStringResponder(200, "{}").Once(t.Log),
|
||||
// )
|
||||
func (r Responder) Once(fn ...func(...any)) Responder {
|
||||
return r.times("Once", 1, fn...)
|
||||
}
|
||||
|
||||
// Trace returns a new [Responder] that allows to easily trace the calls
|
||||
// of the original [Responder] using fn. It can be used in conjunction
|
||||
// with the testing package as in the example below with the help of
|
||||
// [*testing.T.Log] method:
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/jarcoal/httpmock"
|
||||
// )
|
||||
// ...
|
||||
// func TestMyApp(t *testing.T) {
|
||||
// ...
|
||||
// httpmock.RegisterResponder("GET", "/foo/bar",
|
||||
// httpmock.NewStringResponder(200, "{}").Trace(t.Log),
|
||||
// )
|
||||
func (r Responder) Trace(fn func(...any)) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := r(req)
|
||||
return resp, internal.StackTracer{
|
||||
CustomFn: fn,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delay returns a new [Responder] that calls the original r Responder
|
||||
// after a delay of d.
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "time"
|
||||
// "github.com/jarcoal/httpmock"
|
||||
// )
|
||||
// ...
|
||||
// func TestMyApp(t *testing.T) {
|
||||
// ...
|
||||
// httpmock.RegisterResponder("GET", "/foo/bar",
|
||||
// httpmock.NewStringResponder(200, "{}").Delay(100*time.Millisecond),
|
||||
// )
|
||||
func (r Responder) Delay(d time.Duration) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
time.Sleep(d)
|
||||
return r(req)
|
||||
}
|
||||
}
|
||||
|
||||
var errThenDone = errors.New("ThenDone")
|
||||
|
||||
// similar is simple but a bit tricky. Here we consider two Responder
|
||||
// are similar if they share the same function, but not necessarily
|
||||
// the same environment. It is only used by Then below.
|
||||
func (r Responder) similar(other Responder) bool {
|
||||
return reflect.ValueOf(r).Pointer() == reflect.ValueOf(other).Pointer()
|
||||
}
|
||||
|
||||
// Then returns a new [Responder] that calls r on first invocation, then
|
||||
// next on following ones, except when Then is chained, in this case
|
||||
// next is called only once:
|
||||
//
|
||||
// A := httpmock.NewStringResponder(200, "A")
|
||||
// B := httpmock.NewStringResponder(200, "B")
|
||||
// C := httpmock.NewStringResponder(200, "C")
|
||||
//
|
||||
// httpmock.RegisterResponder("GET", "/pipo", A.Then(B).Then(C))
|
||||
//
|
||||
// http.Get("http://foo.bar/pipo") // A is called
|
||||
// http.Get("http://foo.bar/pipo") // B is called
|
||||
// http.Get("http://foo.bar/pipo") // C is called
|
||||
// http.Get("http://foo.bar/pipo") // C is called, and so on
|
||||
//
|
||||
// A panic occurs if next is the result of another Then call (because
|
||||
// allowing it could cause inextricable problems at runtime). Then
|
||||
// calls can be chained, but cannot call each other by
|
||||
// parameter. Example:
|
||||
//
|
||||
// A.Then(B).Then(C) // is OK
|
||||
// A.Then(B.Then(C)) // panics as A.Then() parameter is another Then() call
|
||||
//
|
||||
// See also [ResponderFromMultipleResponses].
|
||||
func (r Responder) Then(next Responder) (x Responder) {
|
||||
var done int
|
||||
var mu sync.Mutex
|
||||
x = func(req *http.Request) (*http.Response, error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
ctx := req.Context()
|
||||
thenCalledUs, _ := ctx.Value(fromThenKey).(bool)
|
||||
if !thenCalledUs {
|
||||
req = req.WithContext(context.WithValue(ctx, fromThenKey, true))
|
||||
}
|
||||
|
||||
switch done {
|
||||
case 0:
|
||||
resp, err := r(req)
|
||||
if err != errThenDone {
|
||||
if !x.similar(r) { // r is NOT a Then
|
||||
done = 1
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case 1:
|
||||
done = 2 // next is NEVER a Then, as it is forbidden by design
|
||||
return next(req)
|
||||
}
|
||||
if thenCalledUs {
|
||||
return nil, errThenDone
|
||||
}
|
||||
return next(req)
|
||||
}
|
||||
|
||||
if next.similar(x) {
|
||||
panic("Then() does not accept another Then() Responder as parameter")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetContentLength returns a new [Responder] based on r that ensures
|
||||
// the returned [*http.Response] ContentLength field and
|
||||
// Content-Length header are set to the right value.
|
||||
//
|
||||
// If r returns an [*http.Response] with a nil Body or equal to
|
||||
// [http.NoBody], the length is always set to 0.
|
||||
//
|
||||
// If r returned response.Body implements:
|
||||
//
|
||||
// Len() int
|
||||
//
|
||||
// then the length is set to the Body.Len() returned value. All
|
||||
// httpmock generated bodies implement this method. Beware that
|
||||
// [strings.Builder], [strings.Reader], [bytes.Buffer] and
|
||||
// [bytes.Reader] types used with [io.NopCloser] do not implement
|
||||
// Len() anymore.
|
||||
//
|
||||
// Otherwise, r returned response.Body is entirely copied into an
|
||||
// internal buffer to get its length, then it is closed. The Body of
|
||||
// the [*http.Response] returned by the [Responder] returned by
|
||||
// SetContentLength can then be read again to return its content as
|
||||
// usual. But keep in mind that each time this [Responder] is called,
|
||||
// r is called first. So this one has to carefully handle its body: it
|
||||
// is highly recommended to use [NewRespBodyFromString] or
|
||||
// [NewRespBodyFromBytes] to set the body once (as
|
||||
// [NewStringResponder] and [NewBytesResponder] do behind the scene),
|
||||
// or to build the body each time r is called.
|
||||
//
|
||||
// The following calls are all correct:
|
||||
//
|
||||
// responder = httpmock.NewStringResponder(200, "BODY").SetContentLength()
|
||||
// responder = httpmock.NewBytesResponder(200, []byte("BODY")).SetContentLength()
|
||||
// responder = ResponderFromResponse(&http.Response{
|
||||
// // build a body once, but httpmock knows how to "rearm" it once read
|
||||
// Body: NewRespBodyFromString("BODY"),
|
||||
// StatusCode: 200,
|
||||
// }).SetContentLength()
|
||||
// responder = httpmock.Responder(func(req *http.Request) (*http.Response, error) {
|
||||
// // build a new body for each call
|
||||
// return &http.Response{
|
||||
// StatusCode: 200,
|
||||
// Body: io.NopCloser(strings.NewReader("BODY")),
|
||||
// }, nil
|
||||
// }).SetContentLength()
|
||||
//
|
||||
// But the following is not correct:
|
||||
//
|
||||
// responder = httpmock.ResponderFromResponse(&http.Response{
|
||||
// StatusCode: 200,
|
||||
// Body: io.NopCloser(strings.NewReader("BODY")),
|
||||
// }).SetContentLength()
|
||||
//
|
||||
// it will only succeed for the first responder call. The following
|
||||
// calls will deliver responses with an empty body, as it will already
|
||||
// been read by the first call.
|
||||
func (r Responder) SetContentLength() Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := r(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nr := *resp
|
||||
switch nr.Body {
|
||||
case nil:
|
||||
nr.Body = http.NoBody
|
||||
fallthrough
|
||||
case http.NoBody:
|
||||
nr.ContentLength = 0
|
||||
default:
|
||||
bl, ok := nr.Body.(interface{ Len() int })
|
||||
if !ok {
|
||||
copyBody := &dummyReadCloser{orig: nr.Body}
|
||||
bl, nr.Body = copyBody, copyBody
|
||||
}
|
||||
nr.ContentLength = int64(bl.Len())
|
||||
}
|
||||
if nr.Header == nil {
|
||||
nr.Header = http.Header{}
|
||||
}
|
||||
nr.Header = nr.Header.Clone()
|
||||
nr.Header.Set("Content-Length", strconv.FormatInt(nr.ContentLength, 10))
|
||||
return &nr, nil
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderAdd returns a new [Responder] based on r that ensures the
|
||||
// returned [*http.Response] includes h header. It adds each h entry
|
||||
// to the header. It appends to any existing values associated with
|
||||
// each h key. Each key is case insensitive; it is canonicalized by
|
||||
// [http.CanonicalHeaderKey].
|
||||
//
|
||||
// See also [Responder.HeaderSet] and [Responder.SetContentLength].
|
||||
func (r Responder) HeaderAdd(h http.Header) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := r(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nr := *resp
|
||||
if nr.Header == nil {
|
||||
nr.Header = make(http.Header, len(h))
|
||||
}
|
||||
nr.Header = nr.Header.Clone()
|
||||
for k, v := range h {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
if v == nil {
|
||||
if _, ok := nr.Header[k]; !ok {
|
||||
nr.Header[k] = nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
nr.Header[k] = append(nr.Header[k], v...)
|
||||
}
|
||||
return &nr, nil
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderSet returns a new [Responder] based on r that ensures the
|
||||
// returned [*http.Response] includes h header. It sets the header
|
||||
// entries associated with each h key. It replaces any existing values
|
||||
// associated each h key. Each key is case insensitive; it is
|
||||
// canonicalized by [http.CanonicalHeaderKey].
|
||||
//
|
||||
// See also [Responder.HeaderAdd] and [Responder.SetContentLength].
|
||||
func (r Responder) HeaderSet(h http.Header) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
resp, err := r(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nr := *resp
|
||||
if nr.Header == nil {
|
||||
nr.Header = make(http.Header, len(h))
|
||||
}
|
||||
nr.Header = nr.Header.Clone()
|
||||
for k, v := range h {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
if v == nil {
|
||||
nr.Header[k] = nil
|
||||
continue
|
||||
}
|
||||
nr.Header[k] = append([]string(nil), v...)
|
||||
}
|
||||
return &nr, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ResponderFromResponse wraps an [*http.Response] in a [Responder].
|
||||
//
|
||||
// Be careful, except for responses generated by httpmock
|
||||
// ([NewStringResponse] and [NewBytesResponse] functions) for which
|
||||
// there is no problems, it is the caller responsibility to ensure the
|
||||
// response body can be read several times and concurrently if needed,
|
||||
// as it is shared among all [Responder] returned responses.
|
||||
//
|
||||
// For home-made responses, [NewRespBodyFromString] and
|
||||
// [NewRespBodyFromBytes] functions can be used to produce response
|
||||
// bodies that can be read several times and concurrently.
|
||||
func ResponderFromResponse(resp *http.Response) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
res := *resp
|
||||
|
||||
// Our stuff: generate a new io.ReadCloser instance sharing the same buffer
|
||||
if body, ok := resp.Body.(*dummyReadCloser); ok {
|
||||
res.Body = body.copy()
|
||||
}
|
||||
|
||||
res.Request = req
|
||||
return &res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ResponderFromMultipleResponses wraps an [*http.Response] list in a
|
||||
// [Responder].
|
||||
//
|
||||
// Each response will be returned in the order of the provided list.
|
||||
// If the [Responder] is called more than the size of the provided
|
||||
// list, an error will be thrown.
|
||||
//
|
||||
// Be careful, except for responses generated by httpmock
|
||||
// ([NewStringResponse] and [NewBytesResponse] functions) for which
|
||||
// there is no problems, it is the caller responsibility to ensure the
|
||||
// response body can be read several times and concurrently if needed,
|
||||
// as it is shared among all [Responder] returned responses.
|
||||
//
|
||||
// For home-made responses, [NewRespBodyFromString] and
|
||||
// [NewRespBodyFromBytes] functions can be used to produce response
|
||||
// bodies that can be read several times and concurrently.
|
||||
//
|
||||
// If all responses have been returned and fn is passed and non-nil,
|
||||
// it acts as the fn parameter of [NewNotFoundResponder], allowing to
|
||||
// dump the stack trace to localize the origin of the call.
|
||||
//
|
||||
// import (
|
||||
// "github.com/jarcoal/httpmock"
|
||||
// "testing"
|
||||
// )
|
||||
// ...
|
||||
// func TestMyApp(t *testing.T) {
|
||||
// ...
|
||||
// // This responder is callable only once, then an error is returned and
|
||||
// // the stacktrace of the call logged using t.Log()
|
||||
// httpmock.RegisterResponder("GET", "/foo/bar",
|
||||
// httpmock.ResponderFromMultipleResponses(
|
||||
// []*http.Response{
|
||||
// httpmock.NewStringResponse(200, `{"name":"bar"}`),
|
||||
// httpmock.NewStringResponse(404, `{"mesg":"Not found"}`),
|
||||
// },
|
||||
// t.Log),
|
||||
// )
|
||||
// }
|
||||
//
|
||||
// See also [Responder.Then].
|
||||
func ResponderFromMultipleResponses(responses []*http.Response, fn ...func(...any)) Responder {
|
||||
responseIndex := 0
|
||||
mutex := sync.Mutex{}
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
defer func() { responseIndex++ }()
|
||||
if responseIndex >= len(responses) {
|
||||
err := internal.StackTracer{
|
||||
Err: fmt.Errorf("not enough responses provided: responder called %d time(s) but %d response(s) provided", responseIndex+1, len(responses)),
|
||||
}
|
||||
if len(fn) > 0 {
|
||||
err.CustomFn = fn[0]
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
res := *responses[responseIndex]
|
||||
// Our stuff: generate a new io.ReadCloser instance sharing the same buffer
|
||||
if body, ok := responses[responseIndex].Body.(*dummyReadCloser); ok {
|
||||
res.Body = body.copy()
|
||||
}
|
||||
|
||||
res.Request = req
|
||||
return &res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewErrorResponder creates a [Responder] that returns an empty request and the
|
||||
// given error. This can be used to e.g. imitate more deep http errors for the
|
||||
// client.
|
||||
func NewErrorResponder(err error) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// NewNotFoundResponder creates a [Responder] typically used in
|
||||
// conjunction with [RegisterNoResponder] function and [testing]
|
||||
// package, to be proactive when a [Responder] is not found. fn is
|
||||
// called with a unique string parameter containing the name of the
|
||||
// missing route and the stack trace to localize the origin of the
|
||||
// call. If fn returns (= if it does not panic), the [Responder] returns
|
||||
// an error of the form: "Responder not found for GET http://foo.bar/path".
|
||||
// Note that fn can be nil.
|
||||
//
|
||||
// It is useful when writing tests to ensure that all routes have been
|
||||
// mocked.
|
||||
//
|
||||
// Example of use:
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/jarcoal/httpmock"
|
||||
// )
|
||||
// ...
|
||||
// func TestMyApp(t *testing.T) {
|
||||
// ...
|
||||
// // Calls testing.Fatal with the name of Responder-less route and
|
||||
// // the stack trace of the call.
|
||||
// httpmock.RegisterNoResponder(httpmock.NewNotFoundResponder(t.Fatal))
|
||||
//
|
||||
// Will abort the current test and print something like:
|
||||
//
|
||||
// transport_test.go:735: Called from net/http.Get()
|
||||
// at /go/src/github.com/jarcoal/httpmock/transport_test.go:714
|
||||
// github.com/jarcoal/httpmock.TestCheckStackTracer()
|
||||
// at /go/src/testing/testing.go:865
|
||||
// testing.tRunner()
|
||||
// at /go/src/runtime/asm_amd64.s:1337
|
||||
func NewNotFoundResponder(fn func(...any)) Responder {
|
||||
return func(req *http.Request) (*http.Response, error) {
|
||||
var extra string
|
||||
suggested, _ := req.Context().Value(suggestedKey).(*suggestedInfo)
|
||||
if suggested != nil {
|
||||
if suggested.kind == "matcher" {
|
||||
extra = fmt.Sprintf(` despite %s`, suggested.suggested)
|
||||
} else {
|
||||
extra = fmt.Sprintf(`, but one matches %s %q`, suggested.kind, suggested.suggested)
|
||||
}
|
||||
}
|
||||
return nil, internal.StackTracer{
|
||||
CustomFn: fn,
|
||||
Err: fmt.Errorf("Responder not found for %s %s%s", req.Method, req.URL, extra),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewStringResponse creates an [*http.Response] with a body based on
|
||||
// the given string. Also accepts an HTTP status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewStringResponse(200, httpmock.File("body.txt").String())
|
||||
func NewStringResponse(status int, body string) *http.Response {
|
||||
return &http.Response{
|
||||
Status: strconv.Itoa(status),
|
||||
StatusCode: status,
|
||||
Body: NewRespBodyFromString(body),
|
||||
Header: http.Header{},
|
||||
ContentLength: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// NewStringResponder creates a [Responder] from a given body (as a
|
||||
// string) and status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewStringResponder(200, httpmock.File("body.txt").String())
|
||||
func NewStringResponder(status int, body string) Responder {
|
||||
return ResponderFromResponse(NewStringResponse(status, body))
|
||||
}
|
||||
|
||||
// NewBytesResponse creates an [*http.Response] with a body based on the
|
||||
// given bytes. Also accepts an HTTP status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewBytesResponse(200, httpmock.File("body.raw").Bytes())
|
||||
func NewBytesResponse(status int, body []byte) *http.Response {
|
||||
return &http.Response{
|
||||
Status: strconv.Itoa(status),
|
||||
StatusCode: status,
|
||||
Body: NewRespBodyFromBytes(body),
|
||||
Header: http.Header{},
|
||||
ContentLength: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBytesResponder creates a [Responder] from a given body (as a byte
|
||||
// slice) and status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewBytesResponder(200, httpmock.File("body.raw").Bytes())
|
||||
func NewBytesResponder(status int, body []byte) Responder {
|
||||
return ResponderFromResponse(NewBytesResponse(status, body))
|
||||
}
|
||||
|
||||
// NewJsonResponse creates an [*http.Response] with a body that is a
|
||||
// JSON encoded representation of the given any. Also accepts
|
||||
// an HTTP status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewJsonResponse(200, httpmock.File("body.json"))
|
||||
func NewJsonResponse(status int, body any) (*http.Response, error) { // nolint: revive
|
||||
encoded, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response := NewBytesResponse(status, encoded)
|
||||
response.Header.Set("Content-Type", "application/json")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// NewJsonResponder creates a [Responder] from a given body (as an
|
||||
// any that is encoded to JSON) and status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewJsonResponder(200, httpmock.File("body.json"))
|
||||
func NewJsonResponder(status int, body any) (Responder, error) { // nolint: revive
|
||||
resp, err := NewJsonResponse(status, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ResponderFromResponse(resp), nil
|
||||
}
|
||||
|
||||
// NewJsonResponderOrPanic is like [NewJsonResponder] but panics in
|
||||
// case of error.
|
||||
//
|
||||
// It simplifies the call of [RegisterResponder], avoiding the use of a
|
||||
// temporary variable and an error check, and so can be used as
|
||||
// [NewStringResponder] or [NewBytesResponder] in such context:
|
||||
//
|
||||
// httpmock.RegisterResponder(
|
||||
// "GET",
|
||||
// "/test/path",
|
||||
// httpmock.NewJsonResponderOrPanic(200, &MyBody),
|
||||
// )
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewJsonResponderOrPanic(200, httpmock.File("body.json"))
|
||||
func NewJsonResponderOrPanic(status int, body any) Responder { // nolint: revive
|
||||
responder, err := NewJsonResponder(status, body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return responder
|
||||
}
|
||||
|
||||
// NewXmlResponse creates an [*http.Response] with a body that is an
|
||||
// XML encoded representation of the given any. Also accepts an HTTP
|
||||
// status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewXmlResponse(200, httpmock.File("body.xml"))
|
||||
func NewXmlResponse(status int, body any) (*http.Response, error) { // nolint: revive
|
||||
var (
|
||||
encoded []byte
|
||||
err error
|
||||
)
|
||||
if f, ok := body.(File); ok {
|
||||
encoded, err = f.bytes()
|
||||
} else {
|
||||
encoded, err = xml.Marshal(body)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response := NewBytesResponse(status, encoded)
|
||||
response.Header.Set("Content-Type", "application/xml")
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// NewXmlResponder creates a [Responder] from a given body (as an
|
||||
// any that is encoded to XML) and status code.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewXmlResponder(200, httpmock.File("body.xml"))
|
||||
func NewXmlResponder(status int, body any) (Responder, error) { // nolint: revive
|
||||
resp, err := NewXmlResponse(status, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ResponderFromResponse(resp), nil
|
||||
}
|
||||
|
||||
// NewXmlResponderOrPanic is like [NewXmlResponder] but panics in case
|
||||
// of error.
|
||||
//
|
||||
// It simplifies the call of [RegisterResponder], avoiding the use of a
|
||||
// temporary variable and an error check, and so can be used as
|
||||
// [NewStringResponder] or [NewBytesResponder] in such context:
|
||||
//
|
||||
// httpmock.RegisterResponder(
|
||||
// "GET",
|
||||
// "/test/path",
|
||||
// httpmock.NewXmlResponderOrPanic(200, &MyBody),
|
||||
// )
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewXmlResponderOrPanic(200, httpmock.File("body.xml"))
|
||||
func NewXmlResponderOrPanic(status int, body any) Responder { // nolint: revive
|
||||
responder, err := NewXmlResponder(status, body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return responder
|
||||
}
|
||||
|
||||
// NewRespBodyFromString creates an [io.ReadCloser] from a string that
|
||||
// is suitable for use as an HTTP response body.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewRespBodyFromString(httpmock.File("body.txt").String())
|
||||
func NewRespBodyFromString(body string) io.ReadCloser {
|
||||
return &dummyReadCloser{orig: body}
|
||||
}
|
||||
|
||||
// NewRespBodyFromBytes creates an [io.ReadCloser] from a byte slice
|
||||
// that is suitable for use as an HTTP response body.
|
||||
//
|
||||
// To pass the content of an existing file as body use [File] as in:
|
||||
//
|
||||
// httpmock.NewRespBodyFromBytes(httpmock.File("body.txt").Bytes())
|
||||
func NewRespBodyFromBytes(body []byte) io.ReadCloser {
|
||||
return &dummyReadCloser{orig: body}
|
||||
}
|
||||
|
||||
type lenReadSeeker interface {
|
||||
io.ReadSeeker
|
||||
Len() int
|
||||
}
|
||||
|
||||
type dummyReadCloser struct {
|
||||
orig any // string or []byte
|
||||
body lenReadSeeker // instanciated on demand from orig
|
||||
}
|
||||
|
||||
// copy returns a new instance resetting d.body to nil.
|
||||
func (d *dummyReadCloser) copy() *dummyReadCloser {
|
||||
return &dummyReadCloser{orig: d.orig}
|
||||
}
|
||||
|
||||
// setup ensures d.body is correctly initialized.
|
||||
func (d *dummyReadCloser) setup() {
|
||||
if d.body == nil {
|
||||
switch body := d.orig.(type) {
|
||||
case string:
|
||||
d.body = strings.NewReader(body)
|
||||
case []byte:
|
||||
d.body = bytes.NewReader(body)
|
||||
case io.ReadCloser:
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, body) //nolint: errcheck
|
||||
body.Close()
|
||||
d.body = bytes.NewReader(buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dummyReadCloser) Read(p []byte) (n int, err error) {
|
||||
d.setup()
|
||||
return d.body.Read(p)
|
||||
}
|
||||
|
||||
func (d *dummyReadCloser) Close() error {
|
||||
d.setup()
|
||||
d.body.Seek(0, io.SeekEnd) // nolint: errcheck
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dummyReadCloser) Len() int {
|
||||
d.setup()
|
||||
return d.body.Len()
|
||||
}
|
||||
1867
vendor/github.com/jarcoal/httpmock/transport.go
generated
vendored
Normal file
1867
vendor/github.com/jarcoal/httpmock/transport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
38
vendor/github.com/pyke369/golang-support/rcache/rcache.go
generated
vendored
Normal file
38
vendor/github.com/pyke369/golang-support/rcache/rcache.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package rcache
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var (
|
||||
cache map[string]*regexp.Regexp = map[string]*regexp.Regexp{}
|
||||
hit int64
|
||||
miss int64
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
func Get(expression string) *regexp.Regexp {
|
||||
lock.RLock()
|
||||
if cache[expression] != nil {
|
||||
atomic.AddInt64(&hit, 1)
|
||||
defer lock.RUnlock()
|
||||
return cache[expression]
|
||||
}
|
||||
atomic.AddInt64(&miss, 1)
|
||||
lock.RUnlock()
|
||||
if regex, err := regexp.Compile(expression); err == nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
cache[expression] = regex
|
||||
return cache[expression]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Stats() (int, int64, int64) {
|
||||
lock.RLock()
|
||||
defer lock.RUnlock()
|
||||
return len(cache), atomic.LoadInt64(&hit), atomic.LoadInt64(&miss)
|
||||
}
|
||||
675
vendor/github.com/pyke369/golang-support/uconfig/uconfig.go
generated
vendored
Normal file
675
vendor/github.com/pyke369/golang-support/uconfig/uconfig.go
generated
vendored
Normal file
@@ -0,0 +1,675 @@
|
||||
package uconfig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pyke369/golang-support/rcache"
|
||||
)
|
||||
|
||||
type UConfig struct {
|
||||
input string
|
||||
config any
|
||||
hash string
|
||||
cache map[string]any
|
||||
separator string
|
||||
cacheLock sync.RWMutex
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type replacer struct {
|
||||
search *regexp.Regexp
|
||||
replace string
|
||||
loop bool
|
||||
}
|
||||
|
||||
var (
|
||||
escaped = "{}[],#/*;:= "
|
||||
unescaper = regexp.MustCompile(`@\d+@`) // match escaped characters (to reverse previous escaping)
|
||||
expander = regexp.MustCompile(`{{([<=|@&!\-\+_])\s*([^{}]*?)\s*}}`) // match external content macros
|
||||
sizer = regexp.MustCompile(`^(\d+(?:\.\d*)?)\s*([KMGTP]?)(B?)$`) // match size value
|
||||
duration1 = regexp.MustCompile(`(\d+)(Y|MO|D|H|MN|S|MS|US)?`) // match duration value form1 (free)
|
||||
duration2 = regexp.MustCompile(`^(?:(\d+):)?(\d{2}):(\d{2})(?:\.(\d{1,3}))?$`) // match duration value form2 (timecode)
|
||||
replacers = []replacer{
|
||||
replacer{regexp.MustCompile("(?m)^(.*?)(?:#|//).*?$"), `$1`, false}, // remove # and // commented portions
|
||||
replacer{regexp.MustCompile(`/\*[^\*]*\*/`), ``, true}, // remove /* */ commented portions
|
||||
replacer{regexp.MustCompile(`(?m)^\s+`), ``, false}, // trim leading spaces
|
||||
replacer{regexp.MustCompile(`(?m)\s+$`), ``, false}, // trim trailing spaces
|
||||
replacer{regexp.MustCompile(`(?m)^(\S+)\s+([^{}\[\],;:=]+);$`), "$1 = $2;", false}, // add missing key-value separators
|
||||
replacer{regexp.MustCompile(`(?m);$`), `,`, false}, // replace ; line terminators by ,
|
||||
replacer{regexp.MustCompile(`(\S+?)\s*[:=]`), `$1:`, false}, // replace = key-value separators by :
|
||||
replacer{regexp.MustCompile(`([}\]])(\s*)([^,}\]\s])`), `$1,$2$3`, false}, // add missing objects/arrays , separators
|
||||
replacer{regexp.MustCompile("(?m)(^[^:]+:.+?[^,])$"), `$1,`, false}, // add missing values trailing , seperators
|
||||
replacer{regexp.MustCompile(`(?m)(^[^\[{][^:\[{]+)\s+([\[{])`), `$1:$2`, true}, // add missing key-(object/array-)value separator
|
||||
replacer{regexp.MustCompile(`(?m)^([^":{}\[\]]+)`), `"$1"`, false}, // add missing quotes around keys
|
||||
replacer{regexp.MustCompile("([:,\\[\\s]+)([^\",\\[\\]{}\\s\n\r]+?)(\\s*[,\\]}])"), `$1"$2"$3`, false}, // add missing quotes around values
|
||||
replacer{regexp.MustCompile("\"[\r\n]"), "\",\n", false}, // add still issing objects/arrays , separators
|
||||
replacer{regexp.MustCompile(`"\s*(.+?)\s*"`), `"$1"`, false}, // trim leading and trailing spaces in quoted strings
|
||||
replacer{regexp.MustCompile(`,+(\s*[}\]])`), `$1`, false}, // remove objets/arrays last element extra ,
|
||||
}
|
||||
)
|
||||
|
||||
func escape(input string) string {
|
||||
var output []byte
|
||||
|
||||
instring := false
|
||||
for index := 0; index < len(input); index++ {
|
||||
if input[index:index+1] == `"` && (index == 0 || input[index-1:index] != `\`) {
|
||||
instring = !instring
|
||||
}
|
||||
if instring {
|
||||
offset := strings.IndexAny(escaped, input[index:index+1])
|
||||
if offset >= 0 {
|
||||
output = append(output, []byte(fmt.Sprintf("@%02d@", offset))...)
|
||||
} else {
|
||||
output = append(output, input[index:index+1]...)
|
||||
}
|
||||
} else {
|
||||
output = append(output, input[index:index+1]...)
|
||||
}
|
||||
}
|
||||
return string(output)
|
||||
}
|
||||
|
||||
func unescape(input string) string {
|
||||
return unescaper.ReplaceAllStringFunc(input, func(a string) string {
|
||||
offset, _ := strconv.Atoi(a[1:3])
|
||||
if offset < len(escaped) {
|
||||
return escaped[offset : offset+1]
|
||||
}
|
||||
return ""
|
||||
})
|
||||
}
|
||||
|
||||
func reduce(input any) {
|
||||
if input != nil {
|
||||
switch reflect.TypeOf(input).Kind() {
|
||||
case reflect.Map:
|
||||
for key := range input.(map[string]any) {
|
||||
var parts []string
|
||||
for _, value := range strings.Split(key, " ") {
|
||||
if value != "" {
|
||||
parts = append(parts, value)
|
||||
}
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
if input.(map[string]any)[parts[0]] == nil || reflect.TypeOf(input.(map[string]any)[parts[0]]).Kind() != reflect.Map {
|
||||
input.(map[string]any)[parts[0]] = make(map[string]any)
|
||||
}
|
||||
input.(map[string]any)[parts[0]].(map[string]any)[parts[1]] = input.(map[string]any)[key]
|
||||
delete(input.(map[string]any), key)
|
||||
}
|
||||
}
|
||||
for _, value := range input.(map[string]any) {
|
||||
reduce(value)
|
||||
}
|
||||
case reflect.Slice:
|
||||
for index := 0; index < len(input.([]any)); index++ {
|
||||
reduce(input.([]any)[index])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func New(input string, inline ...bool) (*UConfig, error) {
|
||||
config := &UConfig{
|
||||
input: input,
|
||||
config: nil,
|
||||
separator: ".",
|
||||
}
|
||||
return config, config.Load(input, inline...)
|
||||
}
|
||||
|
||||
func (c *UConfig) Reload(inline ...bool) error {
|
||||
return c.Load(c.input, inline...)
|
||||
}
|
||||
|
||||
func (c *UConfig) SetSeparator(separator string) {
|
||||
c.separator = separator
|
||||
}
|
||||
|
||||
func (c *UConfig) Load(input string, inline ...bool) error {
|
||||
c.Lock()
|
||||
base, _ := os.Getwd()
|
||||
content := fmt.Sprintf("/*base:%s*/\n", base)
|
||||
if len(inline) > 0 && inline[0] {
|
||||
content += input
|
||||
} else {
|
||||
content += fmt.Sprintf("{{<%s}}", input)
|
||||
}
|
||||
|
||||
for {
|
||||
indexes := expander.FindStringSubmatchIndex(content)
|
||||
if indexes == nil {
|
||||
content = escape(content)
|
||||
for index := 0; index < len(replacers); index++ {
|
||||
mcontent := ""
|
||||
for mcontent != content {
|
||||
mcontent = content
|
||||
content = replacers[index].search.ReplaceAllString(content, replacers[index].replace)
|
||||
if !replacers[index].loop {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
content = unescape(strings.Trim(content, " \n,"))
|
||||
break
|
||||
}
|
||||
|
||||
expanded := ""
|
||||
arguments := strings.Split(content[indexes[4]:indexes[5]], " ")
|
||||
if start := strings.LastIndex(content[0:indexes[2]], "/*base:"); start >= 0 {
|
||||
if end := strings.Index(content[start+7:indexes[2]], "*/"); end > 0 {
|
||||
base = content[start+7 : start+7+end]
|
||||
}
|
||||
}
|
||||
switch content[indexes[2]:indexes[3]] {
|
||||
case "<":
|
||||
if arguments[0][0:1] != "/" {
|
||||
arguments[0] = fmt.Sprintf("%s/%s", base, arguments[0])
|
||||
}
|
||||
nbase := ""
|
||||
if elements, err := filepath.Glob(arguments[0]); err == nil {
|
||||
for _, element := range elements {
|
||||
if mcontent, err := os.ReadFile(element); err == nil {
|
||||
nbase = filepath.Dir(element)
|
||||
expanded += string(mcontent)
|
||||
}
|
||||
}
|
||||
}
|
||||
if nbase != "" && strings.Contains(expanded, "\n") {
|
||||
expanded = fmt.Sprintf("/*base:%s*/\n%s\n/*base:%s*/\n", nbase, expanded, base)
|
||||
}
|
||||
case "=":
|
||||
if elements, err := filepath.Glob(arguments[0]); err == nil {
|
||||
for _, element := range elements {
|
||||
if mcontent, err := os.ReadFile(element); err == nil {
|
||||
for _, line := range strings.Split(string(mcontent), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if (len(line) >= 1 && line[0] != '#') || (len(line) >= 2 && line[0] != '/' && line[1] != '/') {
|
||||
expanded += fmt.Sprintf("\"%s\"\n", line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "|":
|
||||
if arguments[0][0:1] != "/" {
|
||||
arguments[0] = fmt.Sprintf("%s/%s", base, arguments[0])
|
||||
}
|
||||
nbase := ""
|
||||
if elements, err := filepath.Glob(arguments[0]); err == nil {
|
||||
for _, element := range elements {
|
||||
if element[0:1] != "/" {
|
||||
element = fmt.Sprintf("%s/%s", base, element)
|
||||
}
|
||||
if mcontent, err := exec.Command(element, strings.Join(arguments[1:], " ")).Output(); err == nil {
|
||||
nbase = filepath.Dir(element)
|
||||
expanded += string(mcontent)
|
||||
}
|
||||
}
|
||||
}
|
||||
if nbase != "" && strings.Contains(expanded, "\n") {
|
||||
expanded = fmt.Sprintf("/*base:%s*/\n%s\n/*base:%s*/\n", nbase, expanded, base)
|
||||
}
|
||||
case "@":
|
||||
requester := http.Client{
|
||||
Timeout: time.Duration(5 * time.Second),
|
||||
}
|
||||
if response, err := requester.Get(arguments[0]); err == nil {
|
||||
if (response.StatusCode / 100) == 2 {
|
||||
if mcontent, err := io.ReadAll(response.Body); err == nil {
|
||||
expanded += string(mcontent)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "&":
|
||||
expanded += os.Getenv(arguments[0])
|
||||
case "!":
|
||||
if matcher := rcache.Get(fmt.Sprintf(`(?i)^--?(no-?)?(?:%s)(?:(=)(.+))?$`, arguments[0])); matcher != nil {
|
||||
for index := 1; index < len(os.Args); index++ {
|
||||
option := os.Args[index]
|
||||
if option == "--" {
|
||||
break
|
||||
}
|
||||
if captures := matcher.FindStringSubmatch(option); captures != nil {
|
||||
if captures[2] == "=" {
|
||||
expanded = captures[3]
|
||||
} else {
|
||||
if index == len(os.Args)-1 || strings.HasPrefix(os.Args[index+1], "-") {
|
||||
expanded = "true"
|
||||
if captures[1] != "" {
|
||||
expanded = "false"
|
||||
}
|
||||
} else {
|
||||
expanded = os.Args[index+1]
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
case "-":
|
||||
if index := strings.Index(os.Args[0], "-"); index >= 0 {
|
||||
expanded = strings.ToLower(os.Args[0][index+1:])
|
||||
}
|
||||
case "+":
|
||||
if arguments[0][0:1] != "/" {
|
||||
arguments[0] = fmt.Sprintf("%s/%s", base, arguments[0])
|
||||
}
|
||||
if elements, err := filepath.Glob(arguments[0]); err == nil {
|
||||
for _, element := range elements {
|
||||
element = filepath.Base(element)
|
||||
expanded += fmt.Sprintf("%s ", strings.TrimSuffix(element, filepath.Ext(element)))
|
||||
}
|
||||
}
|
||||
expanded = strings.TrimSpace(expanded)
|
||||
case "_":
|
||||
expanded = filepath.Base(input)
|
||||
}
|
||||
content = fmt.Sprintf("%s%s%s", content[0:indexes[0]], expanded, content[indexes[1]:])
|
||||
}
|
||||
|
||||
var config any
|
||||
if err := json.Unmarshal([]byte(content), &config); err != nil {
|
||||
if err := json.Unmarshal([]byte("{"+content+"}"), &config); err != nil {
|
||||
if syntax, ok := err.(*json.SyntaxError); ok && syntax.Offset < int64(len(content)) {
|
||||
if start := strings.LastIndex(content[:syntax.Offset], "\n") + 1; start >= 0 {
|
||||
line := strings.Count(content[:start], "\n") + 1
|
||||
c.Unlock()
|
||||
return fmt.Errorf("uconfig: %s at line %d near %s", syntax, line, content[start:syntax.Offset])
|
||||
}
|
||||
}
|
||||
c.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.config = config
|
||||
c.hash = fmt.Sprintf("%x", sha1.Sum([]byte(content)))
|
||||
c.cache = map[string]any{}
|
||||
reduce(c.config)
|
||||
c.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *UConfig) Loaded() bool {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return !(c.config == nil)
|
||||
}
|
||||
|
||||
func (c *UConfig) Hash() string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.hash
|
||||
}
|
||||
|
||||
func (c *UConfig) String() string {
|
||||
if c.config != nil {
|
||||
config := &bytes.Buffer{}
|
||||
encoder := json.NewEncoder(config)
|
||||
encoder.SetEscapeHTML(false)
|
||||
encoder.SetIndent("", " ")
|
||||
if encoder.Encode(c.config) == nil {
|
||||
return config.String()
|
||||
}
|
||||
}
|
||||
return "{}"
|
||||
}
|
||||
|
||||
func (c *UConfig) Path(input ...string) string {
|
||||
total, count, separator := 0, 0, len(c.separator)
|
||||
for _, value := range input {
|
||||
if length := len(value); length > 0 {
|
||||
total += length
|
||||
count++
|
||||
}
|
||||
}
|
||||
if total == 0 {
|
||||
return ""
|
||||
}
|
||||
total += (count - 1) * separator
|
||||
result, offset := make([]byte, total), 0
|
||||
for _, value := range input {
|
||||
if length := len(value); length > 0 {
|
||||
if offset > 0 {
|
||||
copy(result[offset:], c.separator)
|
||||
offset += separator
|
||||
}
|
||||
copy(result[offset:], value)
|
||||
offset += length
|
||||
}
|
||||
}
|
||||
return unsafe.String(unsafe.SliceData(result), total)
|
||||
}
|
||||
|
||||
func (c *UConfig) Base(path string) string {
|
||||
parts := strings.Split(path, c.separator)
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
func (c *UConfig) GetPaths(path string) []string {
|
||||
var (
|
||||
current any = c.config
|
||||
paths []string = []string{}
|
||||
)
|
||||
|
||||
c.RLock()
|
||||
prefix := ""
|
||||
if current == nil || path == "" {
|
||||
c.RUnlock()
|
||||
return paths
|
||||
}
|
||||
c.cacheLock.RLock()
|
||||
if c.cache[path] != nil {
|
||||
if paths, ok := c.cache[path].([]string); ok {
|
||||
c.cacheLock.RUnlock()
|
||||
c.RUnlock()
|
||||
return paths
|
||||
}
|
||||
}
|
||||
c.cacheLock.RUnlock()
|
||||
if path != "" {
|
||||
prefix = c.separator
|
||||
for _, part := range strings.Split(path, c.separator) {
|
||||
kind := reflect.TypeOf(current).Kind()
|
||||
index, err := strconv.Atoi(part)
|
||||
if (kind == reflect.Slice && (err != nil || index < 0 || index >= len(current.([]any)))) || (kind != reflect.Slice && kind != reflect.Map) {
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = paths
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return paths
|
||||
}
|
||||
if kind == reflect.Slice {
|
||||
current = current.([]any)[index]
|
||||
} else {
|
||||
if current = current.(map[string]any)[strings.TrimSpace(part)]; current == nil {
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = paths
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return paths
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
switch reflect.TypeOf(current).Kind() {
|
||||
case reflect.Slice:
|
||||
for index := 0; index < len(current.([]any)); index++ {
|
||||
paths = append(paths, fmt.Sprintf("%s%s%d", path, prefix, index))
|
||||
}
|
||||
case reflect.Map:
|
||||
for key := range current.(map[string]any) {
|
||||
paths = append(paths, fmt.Sprintf("%s%s%s", path, prefix, key))
|
||||
}
|
||||
}
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = paths
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return paths
|
||||
}
|
||||
|
||||
func (c *UConfig) value(path string) (string, error) {
|
||||
var current any = c.config
|
||||
|
||||
c.RLock()
|
||||
if current == nil || path == "" {
|
||||
c.RUnlock()
|
||||
return "", errors.New(`uconfig: invalid parameter`)
|
||||
}
|
||||
c.cacheLock.RLock()
|
||||
if c.cache[path] != nil {
|
||||
if current, ok := c.cache[path].(bool); ok && !current {
|
||||
c.cacheLock.RUnlock()
|
||||
c.RUnlock()
|
||||
return "", errors.New(`uconfig: invalid path`)
|
||||
}
|
||||
if current, ok := c.cache[path].(string); ok {
|
||||
c.cacheLock.RUnlock()
|
||||
c.RUnlock()
|
||||
return current, nil
|
||||
}
|
||||
}
|
||||
c.cacheLock.RUnlock()
|
||||
for _, part := range strings.Split(path, c.separator) {
|
||||
kind := reflect.TypeOf(current).Kind()
|
||||
index, err := strconv.Atoi(part)
|
||||
if (kind == reflect.Slice && (err != nil || index < 0 || index >= len(current.([]any)))) || (kind != reflect.Slice && kind != reflect.Map) {
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = false
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return "", errors.New(`uconfig: invalid path`)
|
||||
}
|
||||
if kind == reflect.Slice {
|
||||
current = current.([]any)[index]
|
||||
} else {
|
||||
if current = current.(map[string]any)[strings.TrimSpace(part)]; current == nil {
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = false
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return "", errors.New(`uconfig: invalid path`)
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.TypeOf(current).Kind() == reflect.String {
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = current.(string)
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return current.(string), nil
|
||||
}
|
||||
c.cacheLock.Lock()
|
||||
c.cache[path] = false
|
||||
c.cacheLock.Unlock()
|
||||
c.RUnlock()
|
||||
return "", errors.New(`uconfig: invalid path`)
|
||||
}
|
||||
|
||||
func (c *UConfig) GetBoolean(path string, fallback ...bool) bool {
|
||||
if value, err := c.value(path); err == nil {
|
||||
if value = strings.ToLower(strings.TrimSpace(value)); value == "1" || value == "on" || value == "yes" || value == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
if len(fallback) > 0 {
|
||||
return fallback[0]
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *UConfig) GetStrings(path string) []string {
|
||||
list := []string{}
|
||||
for _, path := range c.GetPaths(path) {
|
||||
list = append(list, c.GetString(path, ""))
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func (c *UConfig) GetString(path string, fallback ...string) string {
|
||||
if value, err := c.value(path); err == nil {
|
||||
return value
|
||||
}
|
||||
if len(fallback) > 0 {
|
||||
return fallback[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (c *UConfig) GetStringMatch(path string, fallback, match string) string {
|
||||
return c.GetStringMatchCaptures(path, fallback, match)[0]
|
||||
}
|
||||
func (c *UConfig) GetStringMatchCaptures(path string, fallback, match string) []string {
|
||||
value, err := c.value(path)
|
||||
if err != nil {
|
||||
return []string{fallback}
|
||||
}
|
||||
if match != "" {
|
||||
if matcher := rcache.Get(match); matcher != nil {
|
||||
if matches := matcher.FindStringSubmatch(value); matches != nil {
|
||||
return matches
|
||||
} else {
|
||||
return []string{fallback}
|
||||
}
|
||||
} else {
|
||||
return []string{fallback}
|
||||
}
|
||||
}
|
||||
return []string{value}
|
||||
}
|
||||
|
||||
func (c *UConfig) GetInteger(path string, fallback int64) int64 {
|
||||
return c.GetIntegerBounds(path, fallback, math.MinInt64, math.MaxInt64)
|
||||
}
|
||||
func (c *UConfig) GetIntegerBounds(path string, fallback, min, max int64) int64 {
|
||||
value, err := c.value(path)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
nvalue, err := strconv.ParseInt(strings.TrimSpace(value), 10, 64)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
if nvalue < min {
|
||||
nvalue = min
|
||||
}
|
||||
if nvalue > max {
|
||||
nvalue = max
|
||||
}
|
||||
return nvalue
|
||||
}
|
||||
|
||||
func (c *UConfig) GetFloat(path string, fallback float64) float64 {
|
||||
return c.GetFloatBounds(path, fallback, -math.MaxFloat64, math.MaxFloat64)
|
||||
}
|
||||
func (c *UConfig) GetFloatBounds(path string, fallback, min, max float64) float64 {
|
||||
value, err := c.value(path)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
nvalue, err := strconv.ParseFloat(strings.TrimSpace(value), 64)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
return math.Max(math.Min(nvalue, max), min)
|
||||
}
|
||||
|
||||
func (c *UConfig) GetSize(path string, fallback int64) int64 {
|
||||
return c.GetSizeBounds(path, fallback, math.MinInt64, math.MaxInt64)
|
||||
}
|
||||
func (c *UConfig) GetSizeBounds(path string, fallback, min, max int64) int64 {
|
||||
value, err := c.value(path)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
nvalue := int64(0)
|
||||
if matches := sizer.FindStringSubmatch(strings.TrimSpace(strings.ToUpper(value))); matches != nil {
|
||||
fvalue, err := strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
scale := float64(1000)
|
||||
if matches[3] == "B" {
|
||||
scale = float64(1024)
|
||||
}
|
||||
nvalue = int64(fvalue * math.Pow(scale, float64(strings.Index("_KMGTP", matches[2]))))
|
||||
} else {
|
||||
return fallback
|
||||
}
|
||||
if nvalue < min {
|
||||
nvalue = min
|
||||
}
|
||||
if nvalue > max {
|
||||
nvalue = max
|
||||
}
|
||||
return nvalue
|
||||
}
|
||||
|
||||
func (c *UConfig) GetDuration(path string, fallback float64) time.Duration {
|
||||
return c.GetDurationBounds(path, fallback, 0, math.MaxFloat64)
|
||||
}
|
||||
func (c *UConfig) GetDurationBounds(path string, fallback, min, max float64) time.Duration {
|
||||
value, err := c.value(path)
|
||||
if err != nil {
|
||||
return time.Duration(fallback * float64(time.Second))
|
||||
}
|
||||
nvalue := float64(0.0)
|
||||
if matches := duration1.FindAllStringSubmatch(strings.TrimSpace(strings.ToUpper(value)), -1); matches != nil {
|
||||
for index := 0; index < len(matches); index++ {
|
||||
if uvalue, err := strconv.ParseFloat(matches[index][1], 64); err == nil {
|
||||
switch matches[index][2] {
|
||||
case "Y":
|
||||
nvalue += uvalue * 86400 * 365.256
|
||||
case "MO":
|
||||
nvalue += uvalue * 86400 * 365.256 / 12
|
||||
case "D":
|
||||
nvalue += uvalue * 86400
|
||||
case "H":
|
||||
nvalue += uvalue * 3600
|
||||
case "MN":
|
||||
nvalue += uvalue * 60
|
||||
case "S":
|
||||
nvalue += uvalue
|
||||
case "":
|
||||
nvalue += uvalue
|
||||
case "MS":
|
||||
nvalue += uvalue / 1000
|
||||
case "US":
|
||||
nvalue += uvalue / 1000000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if matches := duration2.FindStringSubmatch(strings.TrimSpace(value)); matches != nil {
|
||||
hours, _ := strconv.ParseFloat(matches[1], 64)
|
||||
minutes, _ := strconv.ParseFloat(matches[2], 64)
|
||||
seconds, _ := strconv.ParseFloat(matches[3], 64)
|
||||
milliseconds, _ := strconv.ParseFloat(matches[4], 64)
|
||||
nvalue = (hours * 3600) + (math.Min(minutes, 59) * 60) + math.Min(seconds, 59) + (milliseconds / 1000)
|
||||
}
|
||||
return time.Duration(math.Max(math.Min(nvalue, max), min) * float64(time.Second))
|
||||
}
|
||||
func Seconds(input time.Duration) float64 {
|
||||
return float64(input) / float64(time.Second)
|
||||
}
|
||||
|
||||
func Args() (args []string) {
|
||||
for index := 1; index < len(os.Args); index++ {
|
||||
option := os.Args[index]
|
||||
if args == nil {
|
||||
if option[0] == '-' {
|
||||
if option != "-" && option != "--" && !strings.Contains(option, "=") && index < len(os.Args)-1 && os.Args[index+1][0] != '-' {
|
||||
index++
|
||||
}
|
||||
} else {
|
||||
args = []string{}
|
||||
}
|
||||
}
|
||||
if args != nil {
|
||||
args = append(args, option)
|
||||
} else if option == "--" {
|
||||
args = []string{}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
536
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
Normal file
536
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
Normal file
@@ -0,0 +1,536 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cast5 implements CAST5, as defined in RFC 2144.
|
||||
//
|
||||
// CAST5 is a legacy cipher and its short block size makes it vulnerable to
|
||||
// birthday bound attacks (see https://sweet32.info). It should only be used
|
||||
// where compatibility with legacy systems, not security, is the goal.
|
||||
//
|
||||
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
|
||||
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
|
||||
// golang.org/x/crypto/chacha20poly1305).
|
||||
package cast5 // import "golang.org/x/crypto/cast5"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const BlockSize = 8
|
||||
const KeySize = 16
|
||||
|
||||
type Cipher struct {
|
||||
masking [16]uint32
|
||||
rotate [16]uint8
|
||||
}
|
||||
|
||||
func NewCipher(key []byte) (c *Cipher, err error) {
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("CAST5: keys must be 16 bytes")
|
||||
}
|
||||
|
||||
c = new(Cipher)
|
||||
c.keySchedule(key)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cipher) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
func (c *Cipher) Encrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
|
||||
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
|
||||
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
|
||||
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
|
||||
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
|
||||
|
||||
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
|
||||
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
|
||||
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
|
||||
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
|
||||
|
||||
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
|
||||
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
|
||||
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
|
||||
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
|
||||
|
||||
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
|
||||
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
|
||||
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
|
||||
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
|
||||
|
||||
dst[0] = uint8(r >> 24)
|
||||
dst[1] = uint8(r >> 16)
|
||||
dst[2] = uint8(r >> 8)
|
||||
dst[3] = uint8(r)
|
||||
dst[4] = uint8(l >> 24)
|
||||
dst[5] = uint8(l >> 16)
|
||||
dst[6] = uint8(l >> 8)
|
||||
dst[7] = uint8(l)
|
||||
}
|
||||
|
||||
func (c *Cipher) Decrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
|
||||
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
|
||||
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
|
||||
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
|
||||
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
|
||||
|
||||
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
|
||||
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
|
||||
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
|
||||
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
|
||||
|
||||
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
|
||||
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
|
||||
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
|
||||
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
|
||||
|
||||
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
|
||||
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
|
||||
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
|
||||
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
|
||||
|
||||
dst[0] = uint8(r >> 24)
|
||||
dst[1] = uint8(r >> 16)
|
||||
dst[2] = uint8(r >> 8)
|
||||
dst[3] = uint8(r)
|
||||
dst[4] = uint8(l >> 24)
|
||||
dst[5] = uint8(l >> 16)
|
||||
dst[6] = uint8(l >> 8)
|
||||
dst[7] = uint8(l)
|
||||
}
|
||||
|
||||
type keyScheduleA [4][7]uint8
|
||||
type keyScheduleB [4][5]uint8
|
||||
|
||||
// keyScheduleRound contains the magic values for a round of the key schedule.
|
||||
// The keyScheduleA deals with the lines like:
|
||||
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
|
||||
// Conceptually, both x and z are in the same array, x first. The first
|
||||
// element describes which word of this array gets written to and the
|
||||
// second, which word gets read. So, for the line above, it's "4, 0", because
|
||||
// it's writing to the first word of z, which, being after x, is word 4, and
|
||||
// reading from the first word of x: word 0.
|
||||
//
|
||||
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
|
||||
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
|
||||
// that it's z that we're indexing.
|
||||
//
|
||||
// keyScheduleB deals with lines like:
|
||||
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
|
||||
// "K1" is ignored because key words are always written in order. So the five
|
||||
// elements are the S-box indexes. They use the same form as in keyScheduleA,
|
||||
// above.
|
||||
|
||||
type keyScheduleRound struct{}
|
||||
type keySchedule []keyScheduleRound
|
||||
|
||||
var schedule = []struct {
|
||||
a keyScheduleA
|
||||
b keyScheduleB
|
||||
}{
|
||||
{
|
||||
keyScheduleA{
|
||||
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
|
||||
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
|
||||
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
|
||||
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
|
||||
},
|
||||
keyScheduleB{
|
||||
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
|
||||
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
|
||||
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
|
||||
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
|
||||
},
|
||||
},
|
||||
{
|
||||
keyScheduleA{
|
||||
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
|
||||
{1, 4, 0, 2, 1, 3, 16 + 2},
|
||||
{2, 5, 7, 6, 5, 4, 16 + 1},
|
||||
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
|
||||
},
|
||||
keyScheduleB{
|
||||
{3, 2, 0xc, 0xd, 8},
|
||||
{1, 0, 0xe, 0xf, 0xd},
|
||||
{7, 6, 8, 9, 3},
|
||||
{5, 4, 0xa, 0xb, 7},
|
||||
},
|
||||
},
|
||||
{
|
||||
keyScheduleA{
|
||||
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
|
||||
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
|
||||
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
|
||||
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
|
||||
},
|
||||
keyScheduleB{
|
||||
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
|
||||
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
|
||||
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
|
||||
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
|
||||
},
|
||||
},
|
||||
{
|
||||
keyScheduleA{
|
||||
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
|
||||
{1, 4, 0, 2, 1, 3, 16 + 2},
|
||||
{2, 5, 7, 6, 5, 4, 16 + 1},
|
||||
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
|
||||
},
|
||||
keyScheduleB{
|
||||
{8, 9, 7, 6, 3},
|
||||
{0xa, 0xb, 5, 4, 7},
|
||||
{0xc, 0xd, 3, 2, 8},
|
||||
{0xe, 0xf, 1, 0, 0xd},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func (c *Cipher) keySchedule(in []byte) {
|
||||
var t [8]uint32
|
||||
var k [32]uint32
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
j := i * 4
|
||||
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
|
||||
}
|
||||
|
||||
x := []byte{6, 7, 4, 5}
|
||||
ki := 0
|
||||
|
||||
for half := 0; half < 2; half++ {
|
||||
for _, round := range schedule {
|
||||
for j := 0; j < 4; j++ {
|
||||
var a [7]uint8
|
||||
copy(a[:], round.a[j][:])
|
||||
w := t[a[1]]
|
||||
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
|
||||
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
|
||||
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
|
||||
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
|
||||
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
|
||||
t[a[0]] = w
|
||||
}
|
||||
|
||||
for j := 0; j < 4; j++ {
|
||||
var b [5]uint8
|
||||
copy(b[:], round.b[j][:])
|
||||
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
|
||||
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
|
||||
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
|
||||
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
|
||||
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
|
||||
k[ki] = w
|
||||
ki++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 16; i++ {
|
||||
c.masking[i] = k[i]
|
||||
c.rotate[i] = uint8(k[16+i] & 0x1f)
|
||||
}
|
||||
}
|
||||
|
||||
// These are the three 'f' functions. See RFC 2144, section 2.2.
|
||||
func f1(d, m uint32, r uint8) uint32 {
|
||||
t := m + d
|
||||
I := bits.RotateLeft32(t, int(r))
|
||||
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
|
||||
}
|
||||
|
||||
func f2(d, m uint32, r uint8) uint32 {
|
||||
t := m ^ d
|
||||
I := bits.RotateLeft32(t, int(r))
|
||||
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
|
||||
}
|
||||
|
||||
func f3(d, m uint32, r uint8) uint32 {
|
||||
t := m - d
|
||||
I := bits.RotateLeft32(t, int(r))
|
||||
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
|
||||
}
|
||||
|
||||
var sBox = [8][256]uint32{
|
||||
{
|
||||
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
|
||||
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
|
||||
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
|
||||
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
|
||||
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
|
||||
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
|
||||
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
|
||||
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
|
||||
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
|
||||
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
|
||||
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
|
||||
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
|
||||
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
|
||||
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
|
||||
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
|
||||
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
|
||||
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
|
||||
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
|
||||
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
|
||||
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
|
||||
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
|
||||
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
|
||||
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
|
||||
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
|
||||
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
|
||||
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
|
||||
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
|
||||
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
|
||||
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
|
||||
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
|
||||
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
|
||||
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
|
||||
},
|
||||
{
|
||||
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
|
||||
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
|
||||
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
|
||||
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
|
||||
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
|
||||
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
|
||||
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
|
||||
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
|
||||
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
|
||||
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
|
||||
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
|
||||
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
|
||||
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
|
||||
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
|
||||
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
|
||||
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
|
||||
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
|
||||
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
|
||||
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
|
||||
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
|
||||
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
|
||||
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
|
||||
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
|
||||
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
|
||||
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
|
||||
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
|
||||
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
|
||||
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
|
||||
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
|
||||
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
|
||||
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
|
||||
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
|
||||
},
|
||||
{
|
||||
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
|
||||
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
|
||||
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
|
||||
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
|
||||
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
|
||||
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
|
||||
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
|
||||
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
|
||||
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
|
||||
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
|
||||
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
|
||||
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
|
||||
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
|
||||
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
|
||||
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
|
||||
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
|
||||
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
|
||||
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
|
||||
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
|
||||
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
|
||||
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
|
||||
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
|
||||
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
|
||||
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
|
||||
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
|
||||
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
|
||||
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
|
||||
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
|
||||
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
|
||||
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
|
||||
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
|
||||
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
|
||||
},
|
||||
{
|
||||
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
|
||||
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
|
||||
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
|
||||
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
|
||||
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
|
||||
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
|
||||
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
|
||||
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
|
||||
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
|
||||
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
|
||||
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
|
||||
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
|
||||
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
|
||||
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
|
||||
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
|
||||
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
|
||||
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
|
||||
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
|
||||
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
|
||||
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
|
||||
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
|
||||
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
|
||||
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
|
||||
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
|
||||
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
|
||||
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
|
||||
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
|
||||
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
|
||||
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
|
||||
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
|
||||
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
|
||||
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
|
||||
},
|
||||
{
|
||||
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
|
||||
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
|
||||
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
|
||||
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
|
||||
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
|
||||
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
|
||||
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
|
||||
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
|
||||
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
|
||||
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
|
||||
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
|
||||
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
|
||||
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
|
||||
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
|
||||
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
|
||||
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
|
||||
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
|
||||
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
|
||||
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
|
||||
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
|
||||
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
|
||||
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
|
||||
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
|
||||
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
|
||||
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
|
||||
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
|
||||
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
|
||||
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
|
||||
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
|
||||
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
|
||||
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
|
||||
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
|
||||
},
|
||||
{
|
||||
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
|
||||
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
|
||||
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
|
||||
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
|
||||
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
|
||||
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
|
||||
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
|
||||
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
|
||||
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
|
||||
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
|
||||
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
|
||||
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
|
||||
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
|
||||
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
|
||||
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
|
||||
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
|
||||
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
|
||||
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
|
||||
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
|
||||
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
|
||||
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
|
||||
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
|
||||
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
|
||||
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
|
||||
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
|
||||
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
|
||||
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
|
||||
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
|
||||
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
|
||||
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
|
||||
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
|
||||
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
|
||||
},
|
||||
{
|
||||
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
|
||||
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
|
||||
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
|
||||
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
|
||||
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
|
||||
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
|
||||
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
|
||||
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
|
||||
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
|
||||
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
|
||||
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
|
||||
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
|
||||
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
|
||||
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
|
||||
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
|
||||
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
|
||||
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
|
||||
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
|
||||
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
|
||||
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
|
||||
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
|
||||
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
|
||||
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
|
||||
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
|
||||
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
|
||||
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
|
||||
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
|
||||
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
|
||||
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
|
||||
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
|
||||
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
|
||||
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
|
||||
},
|
||||
{
|
||||
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
|
||||
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
|
||||
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
|
||||
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
|
||||
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
|
||||
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
|
||||
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
|
||||
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
|
||||
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
|
||||
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
|
||||
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
|
||||
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
|
||||
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
|
||||
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
|
||||
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
|
||||
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
|
||||
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
|
||||
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
|
||||
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
|
||||
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
|
||||
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
|
||||
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
|
||||
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
|
||||
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
|
||||
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
|
||||
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
|
||||
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
|
||||
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
|
||||
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
|
||||
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
|
||||
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
|
||||
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
|
||||
},
|
||||
}
|
||||
122
vendor/golang.org/x/crypto/md4/md4.go
generated
vendored
Normal file
122
vendor/golang.org/x/crypto/md4/md4.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package md4 implements the MD4 hash algorithm as defined in RFC 1320.
|
||||
//
|
||||
// Deprecated: MD4 is cryptographically broken and should should only be used
|
||||
// where compatibility with legacy systems, not security, is the goal. Instead,
|
||||
// use a secure hash like SHA-256 (from crypto/sha256).
|
||||
package md4 // import "golang.org/x/crypto/md4"
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"hash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
crypto.RegisterHash(crypto.MD4, New)
|
||||
}
|
||||
|
||||
// The size of an MD4 checksum in bytes.
|
||||
const Size = 16
|
||||
|
||||
// The blocksize of MD4 in bytes.
|
||||
const BlockSize = 64
|
||||
|
||||
const (
|
||||
_Chunk = 64
|
||||
_Init0 = 0x67452301
|
||||
_Init1 = 0xEFCDAB89
|
||||
_Init2 = 0x98BADCFE
|
||||
_Init3 = 0x10325476
|
||||
)
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
s [4]uint32
|
||||
x [_Chunk]byte
|
||||
nx int
|
||||
len uint64
|
||||
}
|
||||
|
||||
func (d *digest) Reset() {
|
||||
d.s[0] = _Init0
|
||||
d.s[1] = _Init1
|
||||
d.s[2] = _Init2
|
||||
d.s[3] = _Init3
|
||||
d.nx = 0
|
||||
d.len = 0
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the MD4 checksum.
|
||||
func New() hash.Hash {
|
||||
d := new(digest)
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *digest) Size() int { return Size }
|
||||
|
||||
func (d *digest) BlockSize() int { return BlockSize }
|
||||
|
||||
func (d *digest) Write(p []byte) (nn int, err error) {
|
||||
nn = len(p)
|
||||
d.len += uint64(nn)
|
||||
if d.nx > 0 {
|
||||
n := len(p)
|
||||
if n > _Chunk-d.nx {
|
||||
n = _Chunk - d.nx
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
d.x[d.nx+i] = p[i]
|
||||
}
|
||||
d.nx += n
|
||||
if d.nx == _Chunk {
|
||||
_Block(d, d.x[0:])
|
||||
d.nx = 0
|
||||
}
|
||||
p = p[n:]
|
||||
}
|
||||
n := _Block(d, p)
|
||||
p = p[n:]
|
||||
if len(p) > 0 {
|
||||
d.nx = copy(d.x[:], p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d0 *digest) Sum(in []byte) []byte {
|
||||
// Make a copy of d0, so that caller can keep writing and summing.
|
||||
d := new(digest)
|
||||
*d = *d0
|
||||
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
len := d.len
|
||||
var tmp [64]byte
|
||||
tmp[0] = 0x80
|
||||
if len%64 < 56 {
|
||||
d.Write(tmp[0 : 56-len%64])
|
||||
} else {
|
||||
d.Write(tmp[0 : 64+56-len%64])
|
||||
}
|
||||
|
||||
// Length in bits.
|
||||
len <<= 3
|
||||
for i := uint(0); i < 8; i++ {
|
||||
tmp[i] = byte(len >> (8 * i))
|
||||
}
|
||||
d.Write(tmp[0:8])
|
||||
|
||||
if d.nx != 0 {
|
||||
panic("d.nx != 0")
|
||||
}
|
||||
|
||||
for _, s := range d.s {
|
||||
in = append(in, byte(s>>0))
|
||||
in = append(in, byte(s>>8))
|
||||
in = append(in, byte(s>>16))
|
||||
in = append(in, byte(s>>24))
|
||||
}
|
||||
return in
|
||||
}
|
||||
91
vendor/golang.org/x/crypto/md4/md4block.go
generated
vendored
Normal file
91
vendor/golang.org/x/crypto/md4/md4block.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// MD4 block step.
|
||||
// In its own file so that a faster assembly or C version
|
||||
// can be substituted easily.
|
||||
|
||||
package md4
|
||||
|
||||
import "math/bits"
|
||||
|
||||
var shift1 = []int{3, 7, 11, 19}
|
||||
var shift2 = []int{3, 5, 9, 13}
|
||||
var shift3 = []int{3, 9, 11, 15}
|
||||
|
||||
var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15}
|
||||
var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}
|
||||
|
||||
func _Block(dig *digest, p []byte) int {
|
||||
a := dig.s[0]
|
||||
b := dig.s[1]
|
||||
c := dig.s[2]
|
||||
d := dig.s[3]
|
||||
n := 0
|
||||
var X [16]uint32
|
||||
for len(p) >= _Chunk {
|
||||
aa, bb, cc, dd := a, b, c, d
|
||||
|
||||
j := 0
|
||||
for i := 0; i < 16; i++ {
|
||||
X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
|
||||
j += 4
|
||||
}
|
||||
|
||||
// If this needs to be made faster in the future,
|
||||
// the usual trick is to unroll each of these
|
||||
// loops by a factor of 4; that lets you replace
|
||||
// the shift[] lookups with constants and,
|
||||
// with suitable variable renaming in each
|
||||
// unrolled body, delete the a, b, c, d = d, a, b, c
|
||||
// (or you can let the optimizer do the renaming).
|
||||
//
|
||||
// The index variables are uint so that % by a power
|
||||
// of two can be optimized easily by a compiler.
|
||||
|
||||
// Round 1.
|
||||
for i := uint(0); i < 16; i++ {
|
||||
x := i
|
||||
s := shift1[i%4]
|
||||
f := ((c ^ d) & b) ^ d
|
||||
a += f + X[x]
|
||||
a = bits.RotateLeft32(a, s)
|
||||
a, b, c, d = d, a, b, c
|
||||
}
|
||||
|
||||
// Round 2.
|
||||
for i := uint(0); i < 16; i++ {
|
||||
x := xIndex2[i]
|
||||
s := shift2[i%4]
|
||||
g := (b & c) | (b & d) | (c & d)
|
||||
a += g + X[x] + 0x5a827999
|
||||
a = bits.RotateLeft32(a, s)
|
||||
a, b, c, d = d, a, b, c
|
||||
}
|
||||
|
||||
// Round 3.
|
||||
for i := uint(0); i < 16; i++ {
|
||||
x := xIndex3[i]
|
||||
s := shift3[i%4]
|
||||
h := b ^ c ^ d
|
||||
a += h + X[x] + 0x6ed9eba1
|
||||
a = bits.RotateLeft32(a, s)
|
||||
a, b, c, d = d, a, b, c
|
||||
}
|
||||
|
||||
a += aa
|
||||
b += bb
|
||||
c += cc
|
||||
d += dd
|
||||
|
||||
p = p[_Chunk:]
|
||||
n += _Chunk
|
||||
}
|
||||
|
||||
dig.s[0] = a
|
||||
dig.s[1] = b
|
||||
dig.s[2] = c
|
||||
dig.s[3] = d
|
||||
return n
|
||||
}
|
||||
232
vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
Normal file
232
vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
|
||||
// very similar to PEM except that it has an additional CRC checksum.
|
||||
//
|
||||
// Deprecated: this package is unmaintained except for security fixes. New
|
||||
// applications should consider a more focused, modern alternative to OpenPGP
|
||||
// for their specific task. If you are required to interoperate with OpenPGP
|
||||
// systems and need a maintained package, consider a community fork.
|
||||
// See https://golang.org/issue/44226.
|
||||
package armor // import "golang.org/x/crypto/openpgp/armor"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"golang.org/x/crypto/openpgp/errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Block represents an OpenPGP armored structure.
|
||||
//
|
||||
// The encoded form is:
|
||||
//
|
||||
// -----BEGIN Type-----
|
||||
// Headers
|
||||
//
|
||||
// base64-encoded Bytes
|
||||
// '=' base64 encoded checksum
|
||||
// -----END Type-----
|
||||
//
|
||||
// where Headers is a possibly empty sequence of Key: Value lines.
|
||||
//
|
||||
// Since the armored data can be very large, this package presents a streaming
|
||||
// interface.
|
||||
type Block struct {
|
||||
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
|
||||
Header map[string]string // Optional headers.
|
||||
Body io.Reader // A Reader from which the contents can be read
|
||||
lReader lineReader
|
||||
oReader openpgpReader
|
||||
}
|
||||
|
||||
var ArmorCorrupt error = errors.StructuralError("armor invalid")
|
||||
|
||||
const crc24Init = 0xb704ce
|
||||
const crc24Poly = 0x1864cfb
|
||||
const crc24Mask = 0xffffff
|
||||
|
||||
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
|
||||
func crc24(crc uint32, d []byte) uint32 {
|
||||
for _, b := range d {
|
||||
crc ^= uint32(b) << 16
|
||||
for i := 0; i < 8; i++ {
|
||||
crc <<= 1
|
||||
if crc&0x1000000 != 0 {
|
||||
crc ^= crc24Poly
|
||||
}
|
||||
}
|
||||
}
|
||||
return crc
|
||||
}
|
||||
|
||||
var armorStart = []byte("-----BEGIN ")
|
||||
var armorEnd = []byte("-----END ")
|
||||
var armorEndOfLine = []byte("-----")
|
||||
|
||||
// lineReader wraps a line based reader. It watches for the end of an armor
|
||||
// block and records the expected CRC value.
|
||||
type lineReader struct {
|
||||
in *bufio.Reader
|
||||
buf []byte
|
||||
eof bool
|
||||
crc uint32
|
||||
crcSet bool
|
||||
}
|
||||
|
||||
func (l *lineReader) Read(p []byte) (n int, err error) {
|
||||
if l.eof {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if len(l.buf) > 0 {
|
||||
n = copy(p, l.buf)
|
||||
l.buf = l.buf[n:]
|
||||
return
|
||||
}
|
||||
|
||||
line, isPrefix, err := l.in.ReadLine()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if isPrefix {
|
||||
return 0, ArmorCorrupt
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(line, armorEnd) {
|
||||
l.eof = true
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if len(line) == 5 && line[0] == '=' {
|
||||
// This is the checksum line
|
||||
var expectedBytes [3]byte
|
||||
var m int
|
||||
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
|
||||
if m != 3 || err != nil {
|
||||
return
|
||||
}
|
||||
l.crc = uint32(expectedBytes[0])<<16 |
|
||||
uint32(expectedBytes[1])<<8 |
|
||||
uint32(expectedBytes[2])
|
||||
|
||||
line, _, err = l.in.ReadLine()
|
||||
if err != nil && err != io.EOF {
|
||||
return
|
||||
}
|
||||
if !bytes.HasPrefix(line, armorEnd) {
|
||||
return 0, ArmorCorrupt
|
||||
}
|
||||
|
||||
l.eof = true
|
||||
l.crcSet = true
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if len(line) > 96 {
|
||||
return 0, ArmorCorrupt
|
||||
}
|
||||
|
||||
n = copy(p, line)
|
||||
bytesToSave := len(line) - n
|
||||
if bytesToSave > 0 {
|
||||
if cap(l.buf) < bytesToSave {
|
||||
l.buf = make([]byte, 0, bytesToSave)
|
||||
}
|
||||
l.buf = l.buf[0:bytesToSave]
|
||||
copy(l.buf, line[n:])
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
|
||||
// a running CRC of the resulting data and checks the CRC against the value
|
||||
// found by the lineReader at EOF.
|
||||
type openpgpReader struct {
|
||||
lReader *lineReader
|
||||
b64Reader io.Reader
|
||||
currentCRC uint32
|
||||
}
|
||||
|
||||
func (r *openpgpReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.b64Reader.Read(p)
|
||||
r.currentCRC = crc24(r.currentCRC, p[:n])
|
||||
|
||||
if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask {
|
||||
return 0, ArmorCorrupt
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Decode reads a PGP armored block from the given Reader. It will ignore
|
||||
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
|
||||
// given Reader is not usable after calling this function: an arbitrary amount
|
||||
// of data may have been read past the end of the block.
|
||||
func Decode(in io.Reader) (p *Block, err error) {
|
||||
r := bufio.NewReaderSize(in, 100)
|
||||
var line []byte
|
||||
ignoreNext := false
|
||||
|
||||
TryNextBlock:
|
||||
p = nil
|
||||
|
||||
// Skip leading garbage
|
||||
for {
|
||||
ignoreThis := ignoreNext
|
||||
line, ignoreNext, err = r.ReadLine()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if ignoreNext || ignoreThis {
|
||||
continue
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
p = new(Block)
|
||||
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
|
||||
p.Header = make(map[string]string)
|
||||
nextIsContinuation := false
|
||||
var lastKey string
|
||||
|
||||
// Read headers
|
||||
for {
|
||||
isContinuation := nextIsContinuation
|
||||
line, nextIsContinuation, err = r.ReadLine()
|
||||
if err != nil {
|
||||
p = nil
|
||||
return
|
||||
}
|
||||
if isContinuation {
|
||||
p.Header[lastKey] += string(line)
|
||||
continue
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
i := bytes.Index(line, []byte(": "))
|
||||
if i == -1 {
|
||||
goto TryNextBlock
|
||||
}
|
||||
lastKey = string(line[:i])
|
||||
p.Header[lastKey] = string(line[i+2:])
|
||||
}
|
||||
|
||||
p.lReader.in = r
|
||||
p.oReader.currentCRC = crc24Init
|
||||
p.oReader.lReader = &p.lReader
|
||||
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
|
||||
p.Body = &p.oReader
|
||||
|
||||
return
|
||||
}
|
||||
161
vendor/golang.org/x/crypto/openpgp/armor/encode.go
generated
vendored
Normal file
161
vendor/golang.org/x/crypto/openpgp/armor/encode.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package armor
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"io"
|
||||
)
|
||||
|
||||
var armorHeaderSep = []byte(": ")
|
||||
var blockEnd = []byte("\n=")
|
||||
var newline = []byte("\n")
|
||||
var armorEndOfLineOut = []byte("-----\n")
|
||||
|
||||
// writeSlices writes its arguments to the given Writer.
|
||||
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
|
||||
for _, s := range slices {
|
||||
_, err = out.Write(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// lineBreaker breaks data across several lines, all of the same byte length
|
||||
// (except possibly the last). Lines are broken with a single '\n'.
|
||||
type lineBreaker struct {
|
||||
lineLength int
|
||||
line []byte
|
||||
used int
|
||||
out io.Writer
|
||||
haveWritten bool
|
||||
}
|
||||
|
||||
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
|
||||
return &lineBreaker{
|
||||
lineLength: lineLength,
|
||||
line: make([]byte, lineLength),
|
||||
used: 0,
|
||||
out: out,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lineBreaker) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if l.used == 0 && l.haveWritten {
|
||||
_, err = l.out.Write([]byte{'\n'})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if l.used+len(b) < l.lineLength {
|
||||
l.used += copy(l.line[l.used:], b)
|
||||
return
|
||||
}
|
||||
|
||||
l.haveWritten = true
|
||||
_, err = l.out.Write(l.line[0:l.used])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
excess := l.lineLength - l.used
|
||||
l.used = 0
|
||||
|
||||
_, err = l.out.Write(b[0:excess])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = l.Write(b[excess:])
|
||||
return
|
||||
}
|
||||
|
||||
func (l *lineBreaker) Close() (err error) {
|
||||
if l.used > 0 {
|
||||
_, err = l.out.Write(l.line[0:l.used])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// encoding keeps track of a running CRC24 over the data which has been written
|
||||
// to it and outputs a OpenPGP checksum when closed, followed by an armor
|
||||
// trailer.
|
||||
//
|
||||
// It's built into a stack of io.Writers:
|
||||
//
|
||||
// encoding -> base64 encoder -> lineBreaker -> out
|
||||
type encoding struct {
|
||||
out io.Writer
|
||||
breaker *lineBreaker
|
||||
b64 io.WriteCloser
|
||||
crc uint32
|
||||
blockType []byte
|
||||
}
|
||||
|
||||
func (e *encoding) Write(data []byte) (n int, err error) {
|
||||
e.crc = crc24(e.crc, data)
|
||||
return e.b64.Write(data)
|
||||
}
|
||||
|
||||
func (e *encoding) Close() (err error) {
|
||||
err = e.b64.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
e.breaker.Close()
|
||||
|
||||
var checksumBytes [3]byte
|
||||
checksumBytes[0] = byte(e.crc >> 16)
|
||||
checksumBytes[1] = byte(e.crc >> 8)
|
||||
checksumBytes[2] = byte(e.crc)
|
||||
|
||||
var b64ChecksumBytes [4]byte
|
||||
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
|
||||
|
||||
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
|
||||
}
|
||||
|
||||
// Encode returns a WriteCloser which will encode the data written to it in
|
||||
// OpenPGP armor.
|
||||
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
|
||||
bType := []byte(blockType)
|
||||
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range headers {
|
||||
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, err = out.Write(newline)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
e := &encoding{
|
||||
out: out,
|
||||
breaker: newLineBreaker(out, 64),
|
||||
crc: crc24Init,
|
||||
blockType: bType,
|
||||
}
|
||||
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
|
||||
return e, nil
|
||||
}
|
||||
59
vendor/golang.org/x/crypto/openpgp/canonical_text.go
generated
vendored
Normal file
59
vendor/golang.org/x/crypto/openpgp/canonical_text.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package openpgp
|
||||
|
||||
import "hash"
|
||||
|
||||
// NewCanonicalTextHash reformats text written to it into the canonical
|
||||
// form and then applies the hash h. See RFC 4880, section 5.2.1.
|
||||
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
|
||||
return &canonicalTextHash{h, 0}
|
||||
}
|
||||
|
||||
type canonicalTextHash struct {
|
||||
h hash.Hash
|
||||
s int
|
||||
}
|
||||
|
||||
var newline = []byte{'\r', '\n'}
|
||||
|
||||
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
|
||||
start := 0
|
||||
|
||||
for i, c := range buf {
|
||||
switch cth.s {
|
||||
case 0:
|
||||
if c == '\r' {
|
||||
cth.s = 1
|
||||
} else if c == '\n' {
|
||||
cth.h.Write(buf[start:i])
|
||||
cth.h.Write(newline)
|
||||
start = i + 1
|
||||
}
|
||||
case 1:
|
||||
cth.s = 0
|
||||
}
|
||||
}
|
||||
|
||||
cth.h.Write(buf[start:])
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func (cth *canonicalTextHash) Sum(in []byte) []byte {
|
||||
return cth.h.Sum(in)
|
||||
}
|
||||
|
||||
func (cth *canonicalTextHash) Reset() {
|
||||
cth.h.Reset()
|
||||
cth.s = 0
|
||||
}
|
||||
|
||||
func (cth *canonicalTextHash) Size() int {
|
||||
return cth.h.Size()
|
||||
}
|
||||
|
||||
func (cth *canonicalTextHash) BlockSize() int {
|
||||
return cth.h.BlockSize()
|
||||
}
|
||||
424
vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
generated
vendored
Normal file
424
vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
generated
vendored
Normal file
@@ -0,0 +1,424 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package clearsign generates and processes OpenPGP, clear-signed data. See
|
||||
// RFC 4880, section 7.
|
||||
//
|
||||
// Clearsigned messages are cryptographically signed, but the contents of the
|
||||
// message are kept in plaintext so that it can be read without special tools.
|
||||
//
|
||||
// Deprecated: this package is unmaintained except for security fixes. New
|
||||
// applications should consider a more focused, modern alternative to OpenPGP
|
||||
// for their specific task. If you are required to interoperate with OpenPGP
|
||||
// systems and need a maintained package, consider a community fork.
|
||||
// See https://golang.org/issue/44226.
|
||||
package clearsign // import "golang.org/x/crypto/openpgp/clearsign"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
"golang.org/x/crypto/openpgp/errors"
|
||||
"golang.org/x/crypto/openpgp/packet"
|
||||
)
|
||||
|
||||
// A Block represents a clearsigned message. A signature on a Block can
|
||||
// be checked by passing Bytes into openpgp.CheckDetachedSignature.
|
||||
type Block struct {
|
||||
Headers textproto.MIMEHeader // Optional unverified Hash headers
|
||||
Plaintext []byte // The original message text
|
||||
Bytes []byte // The signed message
|
||||
ArmoredSignature *armor.Block // The signature block
|
||||
}
|
||||
|
||||
// start is the marker which denotes the beginning of a clearsigned message.
|
||||
var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----")
|
||||
|
||||
// dashEscape is prefixed to any lines that begin with a hyphen so that they
|
||||
// can't be confused with endText.
|
||||
var dashEscape = []byte("- ")
|
||||
|
||||
// endText is a marker which denotes the end of the message and the start of
|
||||
// an armored signature.
|
||||
var endText = []byte("-----BEGIN PGP SIGNATURE-----")
|
||||
|
||||
// end is a marker which denotes the end of the armored signature.
|
||||
var end = []byte("\n-----END PGP SIGNATURE-----")
|
||||
|
||||
var crlf = []byte("\r\n")
|
||||
var lf = byte('\n')
|
||||
|
||||
// getLine returns the first \r\n or \n delineated line from the given byte
|
||||
// array. The line does not include the \r\n or \n. The remainder of the byte
|
||||
// array (also not including the new line bytes) is also returned and this will
|
||||
// always be smaller than the original argument.
|
||||
func getLine(data []byte) (line, rest []byte) {
|
||||
i := bytes.Index(data, []byte{'\n'})
|
||||
var j int
|
||||
if i < 0 {
|
||||
i = len(data)
|
||||
j = i
|
||||
} else {
|
||||
j = i + 1
|
||||
if i > 0 && data[i-1] == '\r' {
|
||||
i--
|
||||
}
|
||||
}
|
||||
return data[0:i], data[j:]
|
||||
}
|
||||
|
||||
// Decode finds the first clearsigned message in data and returns it, as well as
|
||||
// the suffix of data which remains after the message. Any prefix data is
|
||||
// discarded.
|
||||
//
|
||||
// If no message is found, or if the message is invalid, Decode returns nil and
|
||||
// the whole data slice. The only allowed header type is Hash, and it is not
|
||||
// verified against the signature hash.
|
||||
func Decode(data []byte) (b *Block, rest []byte) {
|
||||
// start begins with a newline. However, at the very beginning of
|
||||
// the byte array, we'll accept the start string without it.
|
||||
rest = data
|
||||
if bytes.HasPrefix(data, start[1:]) {
|
||||
rest = rest[len(start)-1:]
|
||||
} else if i := bytes.Index(data, start); i >= 0 {
|
||||
rest = rest[i+len(start):]
|
||||
} else {
|
||||
return nil, data
|
||||
}
|
||||
|
||||
// Consume the start line and check it does not have a suffix.
|
||||
suffix, rest := getLine(rest)
|
||||
if len(suffix) != 0 {
|
||||
return nil, data
|
||||
}
|
||||
|
||||
var line []byte
|
||||
b = &Block{
|
||||
Headers: make(textproto.MIMEHeader),
|
||||
}
|
||||
|
||||
// Next come a series of header lines.
|
||||
for {
|
||||
// This loop terminates because getLine's second result is
|
||||
// always smaller than its argument.
|
||||
if len(rest) == 0 {
|
||||
return nil, data
|
||||
}
|
||||
// An empty line marks the end of the headers.
|
||||
if line, rest = getLine(rest); len(line) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Reject headers with control or Unicode characters.
|
||||
if i := bytes.IndexFunc(line, func(r rune) bool {
|
||||
return r < 0x20 || r > 0x7e
|
||||
}); i != -1 {
|
||||
return nil, data
|
||||
}
|
||||
|
||||
i := bytes.Index(line, []byte{':'})
|
||||
if i == -1 {
|
||||
return nil, data
|
||||
}
|
||||
|
||||
key, val := string(line[0:i]), string(line[i+1:])
|
||||
key = strings.TrimSpace(key)
|
||||
if key != "Hash" {
|
||||
return nil, data
|
||||
}
|
||||
val = strings.TrimSpace(val)
|
||||
b.Headers.Add(key, val)
|
||||
}
|
||||
|
||||
firstLine := true
|
||||
for {
|
||||
start := rest
|
||||
|
||||
line, rest = getLine(rest)
|
||||
if len(line) == 0 && len(rest) == 0 {
|
||||
// No armored data was found, so this isn't a complete message.
|
||||
return nil, data
|
||||
}
|
||||
if bytes.Equal(line, endText) {
|
||||
// Back up to the start of the line because armor expects to see the
|
||||
// header line.
|
||||
rest = start
|
||||
break
|
||||
}
|
||||
|
||||
// The final CRLF isn't included in the hash so we don't write it until
|
||||
// we've seen the next line.
|
||||
if firstLine {
|
||||
firstLine = false
|
||||
} else {
|
||||
b.Bytes = append(b.Bytes, crlf...)
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(line, dashEscape) {
|
||||
line = line[2:]
|
||||
}
|
||||
line = bytes.TrimRight(line, " \t")
|
||||
b.Bytes = append(b.Bytes, line...)
|
||||
|
||||
b.Plaintext = append(b.Plaintext, line...)
|
||||
b.Plaintext = append(b.Plaintext, lf)
|
||||
}
|
||||
|
||||
// We want to find the extent of the armored data (including any newlines at
|
||||
// the end).
|
||||
i := bytes.Index(rest, end)
|
||||
if i == -1 {
|
||||
return nil, data
|
||||
}
|
||||
i += len(end)
|
||||
for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') {
|
||||
i++
|
||||
}
|
||||
armored := rest[:i]
|
||||
rest = rest[i:]
|
||||
|
||||
var err error
|
||||
b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored))
|
||||
if err != nil {
|
||||
return nil, data
|
||||
}
|
||||
|
||||
return b, rest
|
||||
}
|
||||
|
||||
// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed
|
||||
// message. The clear-signed message is written to buffered and a hash, suitable
|
||||
// for signing, is maintained in h.
|
||||
//
|
||||
// When closed, an armored signature is created and written to complete the
|
||||
// message.
|
||||
type dashEscaper struct {
|
||||
buffered *bufio.Writer
|
||||
hashers []hash.Hash // one per key in privateKeys
|
||||
hashType crypto.Hash
|
||||
toHash io.Writer // writes to all the hashes in hashers
|
||||
|
||||
atBeginningOfLine bool
|
||||
isFirstLine bool
|
||||
|
||||
whitespace []byte
|
||||
byteBuf []byte // a one byte buffer to save allocations
|
||||
|
||||
privateKeys []*packet.PrivateKey
|
||||
config *packet.Config
|
||||
}
|
||||
|
||||
func (d *dashEscaper) Write(data []byte) (n int, err error) {
|
||||
for _, b := range data {
|
||||
d.byteBuf[0] = b
|
||||
|
||||
if d.atBeginningOfLine {
|
||||
// The final CRLF isn't included in the hash so we have to wait
|
||||
// until this point (the start of the next line) before writing it.
|
||||
if !d.isFirstLine {
|
||||
d.toHash.Write(crlf)
|
||||
}
|
||||
d.isFirstLine = false
|
||||
}
|
||||
|
||||
// Any whitespace at the end of the line has to be removed so we
|
||||
// buffer it until we find out whether there's more on this line.
|
||||
if b == ' ' || b == '\t' || b == '\r' {
|
||||
d.whitespace = append(d.whitespace, b)
|
||||
d.atBeginningOfLine = false
|
||||
continue
|
||||
}
|
||||
|
||||
if d.atBeginningOfLine {
|
||||
// At the beginning of a line, hyphens have to be escaped.
|
||||
if b == '-' {
|
||||
// The signature isn't calculated over the dash-escaped text so
|
||||
// the escape is only written to buffered.
|
||||
if _, err = d.buffered.Write(dashEscape); err != nil {
|
||||
return
|
||||
}
|
||||
d.toHash.Write(d.byteBuf)
|
||||
d.atBeginningOfLine = false
|
||||
} else if b == '\n' {
|
||||
// Nothing to do because we delay writing CRLF to the hash.
|
||||
} else {
|
||||
d.toHash.Write(d.byteBuf)
|
||||
d.atBeginningOfLine = false
|
||||
}
|
||||
if err = d.buffered.WriteByte(b); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if b == '\n' {
|
||||
// We got a raw \n. Drop any trailing whitespace and write a
|
||||
// CRLF.
|
||||
d.whitespace = d.whitespace[:0]
|
||||
// We delay writing CRLF to the hash until the start of the
|
||||
// next line.
|
||||
if err = d.buffered.WriteByte(b); err != nil {
|
||||
return
|
||||
}
|
||||
d.atBeginningOfLine = true
|
||||
} else {
|
||||
// Any buffered whitespace wasn't at the end of the line so
|
||||
// we need to write it out.
|
||||
if len(d.whitespace) > 0 {
|
||||
d.toHash.Write(d.whitespace)
|
||||
if _, err = d.buffered.Write(d.whitespace); err != nil {
|
||||
return
|
||||
}
|
||||
d.whitespace = d.whitespace[:0]
|
||||
}
|
||||
d.toHash.Write(d.byteBuf)
|
||||
if err = d.buffered.WriteByte(b); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n = len(data)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *dashEscaper) Close() (err error) {
|
||||
if !d.atBeginningOfLine {
|
||||
if err = d.buffered.WriteByte(lf); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t := d.config.Now()
|
||||
for i, k := range d.privateKeys {
|
||||
sig := new(packet.Signature)
|
||||
sig.SigType = packet.SigTypeText
|
||||
sig.PubKeyAlgo = k.PubKeyAlgo
|
||||
sig.Hash = d.hashType
|
||||
sig.CreationTime = t
|
||||
sig.IssuerKeyId = &k.KeyId
|
||||
|
||||
if err = sig.Sign(d.hashers[i], k, d.config); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sig.Serialize(out); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = out.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = d.buffered.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Encode returns a WriteCloser which will clear-sign a message with privateKey
|
||||
// and write it to w. If config is nil, sensible defaults are used.
|
||||
func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
||||
return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config)
|
||||
}
|
||||
|
||||
// EncodeMulti returns a WriteCloser which will clear-sign a message with all the
|
||||
// private keys indicated and write it to w. If config is nil, sensible defaults
|
||||
// are used.
|
||||
func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
||||
for _, k := range privateKeys {
|
||||
if k.Encrypted {
|
||||
return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString()))
|
||||
}
|
||||
}
|
||||
|
||||
hashType := config.Hash()
|
||||
name := nameOfHash(hashType)
|
||||
if len(name) == 0 {
|
||||
return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType)))
|
||||
}
|
||||
|
||||
if !hashType.Available() {
|
||||
return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType)))
|
||||
}
|
||||
var hashers []hash.Hash
|
||||
var ws []io.Writer
|
||||
for range privateKeys {
|
||||
h := hashType.New()
|
||||
hashers = append(hashers, h)
|
||||
ws = append(ws, h)
|
||||
}
|
||||
toHash := io.MultiWriter(ws...)
|
||||
|
||||
buffered := bufio.NewWriter(w)
|
||||
// start has a \n at the beginning that we don't want here.
|
||||
if _, err = buffered.Write(start[1:]); err != nil {
|
||||
return
|
||||
}
|
||||
if err = buffered.WriteByte(lf); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = buffered.WriteString("Hash: "); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = buffered.WriteString(name); err != nil {
|
||||
return
|
||||
}
|
||||
if err = buffered.WriteByte(lf); err != nil {
|
||||
return
|
||||
}
|
||||
if err = buffered.WriteByte(lf); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
plaintext = &dashEscaper{
|
||||
buffered: buffered,
|
||||
hashers: hashers,
|
||||
hashType: hashType,
|
||||
toHash: toHash,
|
||||
|
||||
atBeginningOfLine: true,
|
||||
isFirstLine: true,
|
||||
|
||||
byteBuf: make([]byte, 1),
|
||||
|
||||
privateKeys: privateKeys,
|
||||
config: config,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// nameOfHash returns the OpenPGP name for the given hash, or the empty string
|
||||
// if the name isn't known. See RFC 4880, section 9.4.
|
||||
func nameOfHash(h crypto.Hash) string {
|
||||
switch h {
|
||||
case crypto.MD5:
|
||||
return "MD5"
|
||||
case crypto.SHA1:
|
||||
return "SHA1"
|
||||
case crypto.RIPEMD160:
|
||||
return "RIPEMD160"
|
||||
case crypto.SHA224:
|
||||
return "SHA224"
|
||||
case crypto.SHA256:
|
||||
return "SHA256"
|
||||
case crypto.SHA384:
|
||||
return "SHA384"
|
||||
case crypto.SHA512:
|
||||
return "SHA512"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
130
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
Normal file
130
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
|
||||
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
|
||||
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
|
||||
// n. 4, 1985, pp. 469-472.
|
||||
//
|
||||
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
|
||||
// unsuitable for other protocols. RSA should be used in preference in any
|
||||
// case.
|
||||
//
|
||||
// Deprecated: this package was only provided to support ElGamal encryption in
|
||||
// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see
|
||||
// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has
|
||||
// compatibility and security issues (see https://eprint.iacr.org/2021/923).
|
||||
// Moreover, this package doesn't protect against side-channel attacks.
|
||||
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"io"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// PublicKey represents an ElGamal public key.
|
||||
type PublicKey struct {
|
||||
G, P, Y *big.Int
|
||||
}
|
||||
|
||||
// PrivateKey represents an ElGamal private key.
|
||||
type PrivateKey struct {
|
||||
PublicKey
|
||||
X *big.Int
|
||||
}
|
||||
|
||||
// Encrypt encrypts the given message to the given public key. The result is a
|
||||
// pair of integers. Errors can result from reading random, or because msg is
|
||||
// too large to be encrypted to the public key.
|
||||
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
|
||||
pLen := (pub.P.BitLen() + 7) / 8
|
||||
if len(msg) > pLen-11 {
|
||||
err = errors.New("elgamal: message too long")
|
||||
return
|
||||
}
|
||||
|
||||
// EM = 0x02 || PS || 0x00 || M
|
||||
em := make([]byte, pLen-1)
|
||||
em[0] = 2
|
||||
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
|
||||
err = nonZeroRandomBytes(ps, random)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
em[len(em)-len(msg)-1] = 0
|
||||
copy(mm, msg)
|
||||
|
||||
m := new(big.Int).SetBytes(em)
|
||||
|
||||
k, err := rand.Int(random, pub.P)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c1 = new(big.Int).Exp(pub.G, k, pub.P)
|
||||
s := new(big.Int).Exp(pub.Y, k, pub.P)
|
||||
c2 = s.Mul(s, m)
|
||||
c2.Mod(c2, pub.P)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Decrypt takes two integers, resulting from an ElGamal encryption, and
|
||||
// returns the plaintext of the message. An error can result only if the
|
||||
// ciphertext is invalid. Users should keep in mind that this is a padding
|
||||
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
|
||||
// be used to break the cryptosystem. See “Chosen Ciphertext Attacks
|
||||
// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel
|
||||
// Bleichenbacher, Advances in Cryptology (Crypto '98),
|
||||
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
|
||||
s := new(big.Int).Exp(c1, priv.X, priv.P)
|
||||
if s.ModInverse(s, priv.P) == nil {
|
||||
return nil, errors.New("elgamal: invalid private key")
|
||||
}
|
||||
s.Mul(s, c2)
|
||||
s.Mod(s, priv.P)
|
||||
em := s.Bytes()
|
||||
|
||||
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
|
||||
|
||||
// The remainder of the plaintext must be a string of non-zero random
|
||||
// octets, followed by a 0, followed by the message.
|
||||
// lookingForIndex: 1 iff we are still looking for the zero.
|
||||
// index: the offset of the first zero byte.
|
||||
var lookingForIndex, index int
|
||||
lookingForIndex = 1
|
||||
|
||||
for i := 1; i < len(em); i++ {
|
||||
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
|
||||
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
|
||||
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
|
||||
}
|
||||
|
||||
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
|
||||
return nil, errors.New("elgamal: decryption error")
|
||||
}
|
||||
return em[index+1:], nil
|
||||
}
|
||||
|
||||
// nonZeroRandomBytes fills the given slice with non-zero random octets.
|
||||
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
|
||||
_, err = io.ReadFull(rand, s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
for s[i] == 0 {
|
||||
_, err = io.ReadFull(rand, s[i:i+1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
78
vendor/golang.org/x/crypto/openpgp/errors/errors.go
generated
vendored
Normal file
78
vendor/golang.org/x/crypto/openpgp/errors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package errors contains common error types for the OpenPGP packages.
|
||||
//
|
||||
// Deprecated: this package is unmaintained except for security fixes. New
|
||||
// applications should consider a more focused, modern alternative to OpenPGP
|
||||
// for their specific task. If you are required to interoperate with OpenPGP
|
||||
// systems and need a maintained package, consider a community fork.
|
||||
// See https://golang.org/issue/44226.
|
||||
package errors // import "golang.org/x/crypto/openpgp/errors"
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A StructuralError is returned when OpenPGP data is found to be syntactically
|
||||
// invalid.
|
||||
type StructuralError string
|
||||
|
||||
func (s StructuralError) Error() string {
|
||||
return "openpgp: invalid data: " + string(s)
|
||||
}
|
||||
|
||||
// UnsupportedError indicates that, although the OpenPGP data is valid, it
|
||||
// makes use of currently unimplemented features.
|
||||
type UnsupportedError string
|
||||
|
||||
func (s UnsupportedError) Error() string {
|
||||
return "openpgp: unsupported feature: " + string(s)
|
||||
}
|
||||
|
||||
// InvalidArgumentError indicates that the caller is in error and passed an
|
||||
// incorrect value.
|
||||
type InvalidArgumentError string
|
||||
|
||||
func (i InvalidArgumentError) Error() string {
|
||||
return "openpgp: invalid argument: " + string(i)
|
||||
}
|
||||
|
||||
// SignatureError indicates that a syntactically valid signature failed to
|
||||
// validate.
|
||||
type SignatureError string
|
||||
|
||||
func (b SignatureError) Error() string {
|
||||
return "openpgp: invalid signature: " + string(b)
|
||||
}
|
||||
|
||||
type keyIncorrectError int
|
||||
|
||||
func (ki keyIncorrectError) Error() string {
|
||||
return "openpgp: incorrect key"
|
||||
}
|
||||
|
||||
var ErrKeyIncorrect error = keyIncorrectError(0)
|
||||
|
||||
type unknownIssuerError int
|
||||
|
||||
func (unknownIssuerError) Error() string {
|
||||
return "openpgp: signature made by unknown entity"
|
||||
}
|
||||
|
||||
var ErrUnknownIssuer error = unknownIssuerError(0)
|
||||
|
||||
type keyRevokedError int
|
||||
|
||||
func (keyRevokedError) Error() string {
|
||||
return "openpgp: signature made by revoked key"
|
||||
}
|
||||
|
||||
var ErrKeyRevoked error = keyRevokedError(0)
|
||||
|
||||
type UnknownPacketTypeError uint8
|
||||
|
||||
func (upte UnknownPacketTypeError) Error() string {
|
||||
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
|
||||
}
|
||||
693
vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
Normal file
693
vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
Normal file
@@ -0,0 +1,693 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package openpgp
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
"golang.org/x/crypto/openpgp/errors"
|
||||
"golang.org/x/crypto/openpgp/packet"
|
||||
)
|
||||
|
||||
// PublicKeyType is the armor type for a PGP public key.
|
||||
var PublicKeyType = "PGP PUBLIC KEY BLOCK"
|
||||
|
||||
// PrivateKeyType is the armor type for a PGP private key.
|
||||
var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
|
||||
|
||||
// An Entity represents the components of an OpenPGP key: a primary public key
|
||||
// (which must be a signing key), one or more identities claimed by that key,
|
||||
// and zero or more subkeys, which may be encryption keys.
|
||||
type Entity struct {
|
||||
PrimaryKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
Identities map[string]*Identity // indexed by Identity.Name
|
||||
Revocations []*packet.Signature
|
||||
Subkeys []Subkey
|
||||
}
|
||||
|
||||
// An Identity represents an identity claimed by an Entity and zero or more
|
||||
// assertions by other entities about that claim.
|
||||
type Identity struct {
|
||||
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
|
||||
UserId *packet.UserId
|
||||
SelfSignature *packet.Signature
|
||||
Signatures []*packet.Signature
|
||||
}
|
||||
|
||||
// A Subkey is an additional public key in an Entity. Subkeys can be used for
|
||||
// encryption.
|
||||
type Subkey struct {
|
||||
PublicKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
Sig *packet.Signature
|
||||
}
|
||||
|
||||
// A Key identifies a specific public key in an Entity. This is either the
|
||||
// Entity's primary key or a subkey.
|
||||
type Key struct {
|
||||
Entity *Entity
|
||||
PublicKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
SelfSignature *packet.Signature
|
||||
}
|
||||
|
||||
// A KeyRing provides access to public and private keys.
|
||||
type KeyRing interface {
|
||||
// KeysById returns the set of keys that have the given key id.
|
||||
KeysById(id uint64) []Key
|
||||
// KeysByIdUsage returns the set of keys with the given id
|
||||
// that also meet the key usage given by requiredUsage.
|
||||
// The requiredUsage is expressed as the bitwise-OR of
|
||||
// packet.KeyFlag* values.
|
||||
KeysByIdUsage(id uint64, requiredUsage byte) []Key
|
||||
// DecryptionKeys returns all private keys that are valid for
|
||||
// decryption.
|
||||
DecryptionKeys() []Key
|
||||
}
|
||||
|
||||
// primaryIdentity returns the Identity marked as primary or the first identity
|
||||
// if none are so marked.
|
||||
func (e *Entity) primaryIdentity() *Identity {
|
||||
var firstIdentity *Identity
|
||||
for _, ident := range e.Identities {
|
||||
if firstIdentity == nil {
|
||||
firstIdentity = ident
|
||||
}
|
||||
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
||||
return ident
|
||||
}
|
||||
}
|
||||
return firstIdentity
|
||||
}
|
||||
|
||||
// encryptionKey returns the best candidate Key for encrypting a message to the
|
||||
// given Entity.
|
||||
func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
|
||||
candidateSubkey := -1
|
||||
|
||||
// Iterate the keys to find the newest key
|
||||
var maxTime time.Time
|
||||
for i, subkey := range e.Subkeys {
|
||||
if subkey.Sig.FlagsValid &&
|
||||
subkey.Sig.FlagEncryptCommunications &&
|
||||
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
|
||||
!subkey.Sig.KeyExpired(now) &&
|
||||
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
|
||||
candidateSubkey = i
|
||||
maxTime = subkey.Sig.CreationTime
|
||||
}
|
||||
}
|
||||
|
||||
if candidateSubkey != -1 {
|
||||
subkey := e.Subkeys[candidateSubkey]
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||
}
|
||||
|
||||
// If we don't have any candidate subkeys for encryption and
|
||||
// the primary key doesn't have any usage metadata then we
|
||||
// assume that the primary key is ok. Or, if the primary key is
|
||||
// marked as ok to encrypt to, then we can obviously use it.
|
||||
i := e.primaryIdentity()
|
||||
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
|
||||
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
|
||||
!i.SelfSignature.KeyExpired(now) {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||
}
|
||||
|
||||
// This Entity appears to be signing only.
|
||||
return Key{}, false
|
||||
}
|
||||
|
||||
// signingKey return the best candidate Key for signing a message with this
|
||||
// Entity.
|
||||
func (e *Entity) signingKey(now time.Time) (Key, bool) {
|
||||
candidateSubkey := -1
|
||||
|
||||
for i, subkey := range e.Subkeys {
|
||||
if subkey.Sig.FlagsValid &&
|
||||
subkey.Sig.FlagSign &&
|
||||
subkey.PublicKey.PubKeyAlgo.CanSign() &&
|
||||
!subkey.Sig.KeyExpired(now) {
|
||||
candidateSubkey = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if candidateSubkey != -1 {
|
||||
subkey := e.Subkeys[candidateSubkey]
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||
}
|
||||
|
||||
// If we have no candidate subkey then we assume that it's ok to sign
|
||||
// with the primary key.
|
||||
i := e.primaryIdentity()
|
||||
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
|
||||
!i.SelfSignature.KeyExpired(now) {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||
}
|
||||
|
||||
return Key{}, false
|
||||
}
|
||||
|
||||
// An EntityList contains one or more Entities.
|
||||
type EntityList []*Entity
|
||||
|
||||
// KeysById returns the set of keys that have the given key id.
|
||||
func (el EntityList) KeysById(id uint64) (keys []Key) {
|
||||
for _, e := range el {
|
||||
if e.PrimaryKey.KeyId == id {
|
||||
var selfSig *packet.Signature
|
||||
for _, ident := range e.Identities {
|
||||
if selfSig == nil {
|
||||
selfSig = ident.SelfSignature
|
||||
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
||||
selfSig = ident.SelfSignature
|
||||
break
|
||||
}
|
||||
}
|
||||
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
|
||||
}
|
||||
|
||||
for _, subKey := range e.Subkeys {
|
||||
if subKey.PublicKey.KeyId == id {
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KeysByIdUsage returns the set of keys with the given id that also meet
|
||||
// the key usage given by requiredUsage. The requiredUsage is expressed as
|
||||
// the bitwise-OR of packet.KeyFlag* values.
|
||||
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
|
||||
for _, key := range el.KeysById(id) {
|
||||
if len(key.Entity.Revocations) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if key.SelfSignature.RevocationReason != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
|
||||
var usage byte
|
||||
if key.SelfSignature.FlagCertify {
|
||||
usage |= packet.KeyFlagCertify
|
||||
}
|
||||
if key.SelfSignature.FlagSign {
|
||||
usage |= packet.KeyFlagSign
|
||||
}
|
||||
if key.SelfSignature.FlagEncryptCommunications {
|
||||
usage |= packet.KeyFlagEncryptCommunications
|
||||
}
|
||||
if key.SelfSignature.FlagEncryptStorage {
|
||||
usage |= packet.KeyFlagEncryptStorage
|
||||
}
|
||||
if usage&requiredUsage != requiredUsage {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecryptionKeys returns all private keys that are valid for decryption.
|
||||
func (el EntityList) DecryptionKeys() (keys []Key) {
|
||||
for _, e := range el {
|
||||
for _, subKey := range e.Subkeys {
|
||||
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
|
||||
func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
|
||||
block, err := armor.Decode(r)
|
||||
if err == io.EOF {
|
||||
return nil, errors.InvalidArgumentError("no armored data found")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if block.Type != PublicKeyType && block.Type != PrivateKeyType {
|
||||
return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
|
||||
}
|
||||
|
||||
return ReadKeyRing(block.Body)
|
||||
}
|
||||
|
||||
// ReadKeyRing reads one or more public/private keys. Unsupported keys are
|
||||
// ignored as long as at least a single valid key is found.
|
||||
func ReadKeyRing(r io.Reader) (el EntityList, err error) {
|
||||
packets := packet.NewReader(r)
|
||||
var lastUnsupportedError error
|
||||
|
||||
for {
|
||||
var e *Entity
|
||||
e, err = ReadEntity(packets)
|
||||
if err != nil {
|
||||
// TODO: warn about skipped unsupported/unreadable keys
|
||||
if _, ok := err.(errors.UnsupportedError); ok {
|
||||
lastUnsupportedError = err
|
||||
err = readToNextPublicKey(packets)
|
||||
} else if _, ok := err.(errors.StructuralError); ok {
|
||||
// Skip unreadable, badly-formatted keys
|
||||
lastUnsupportedError = err
|
||||
err = readToNextPublicKey(packets)
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
el = nil
|
||||
break
|
||||
}
|
||||
} else {
|
||||
el = append(el, e)
|
||||
}
|
||||
}
|
||||
|
||||
if len(el) == 0 && err == nil {
|
||||
err = lastUnsupportedError
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// readToNextPublicKey reads packets until the start of the entity and leaves
|
||||
// the first packet of the new entity in the Reader.
|
||||
func readToNextPublicKey(packets *packet.Reader) (err error) {
|
||||
var p packet.Packet
|
||||
for {
|
||||
p, err = packets.Next()
|
||||
if err == io.EOF {
|
||||
return
|
||||
} else if err != nil {
|
||||
if _, ok := err.(errors.UnsupportedError); ok {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
|
||||
packets.Unread(p)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
|
||||
// given Reader.
|
||||
func ReadEntity(packets *packet.Reader) (*Entity, error) {
|
||||
e := new(Entity)
|
||||
e.Identities = make(map[string]*Identity)
|
||||
|
||||
p, err := packets.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
|
||||
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
|
||||
packets.Unread(p)
|
||||
return nil, errors.StructuralError("first packet was not a public/private key")
|
||||
}
|
||||
e.PrimaryKey = &e.PrivateKey.PublicKey
|
||||
}
|
||||
|
||||
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
|
||||
return nil, errors.StructuralError("primary key cannot be used for signatures")
|
||||
}
|
||||
|
||||
var revocations []*packet.Signature
|
||||
EachPacket:
|
||||
for {
|
||||
p, err := packets.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch pkt := p.(type) {
|
||||
case *packet.UserId:
|
||||
if err := addUserID(e, packets, pkt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *packet.Signature:
|
||||
if pkt.SigType == packet.SigTypeKeyRevocation {
|
||||
revocations = append(revocations, pkt)
|
||||
} else if pkt.SigType == packet.SigTypeDirectSignature {
|
||||
// TODO: RFC4880 5.2.1 permits signatures
|
||||
// directly on keys (eg. to bind additional
|
||||
// revocation keys).
|
||||
}
|
||||
// Else, ignoring the signature as it does not follow anything
|
||||
// we would know to attach it to.
|
||||
case *packet.PrivateKey:
|
||||
if pkt.IsSubkey == false {
|
||||
packets.Unread(p)
|
||||
break EachPacket
|
||||
}
|
||||
err = addSubkey(e, packets, &pkt.PublicKey, pkt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *packet.PublicKey:
|
||||
if pkt.IsSubkey == false {
|
||||
packets.Unread(p)
|
||||
break EachPacket
|
||||
}
|
||||
err = addSubkey(e, packets, pkt, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
// we ignore unknown packets
|
||||
}
|
||||
}
|
||||
|
||||
if len(e.Identities) == 0 {
|
||||
return nil, errors.StructuralError("entity without any identities")
|
||||
}
|
||||
|
||||
for _, revocation := range revocations {
|
||||
err = e.PrimaryKey.VerifyRevocationSignature(revocation)
|
||||
if err == nil {
|
||||
e.Revocations = append(e.Revocations, revocation)
|
||||
} else {
|
||||
// TODO: RFC 4880 5.2.3.15 defines revocation keys.
|
||||
return nil, errors.StructuralError("revocation signature signed by alternate key")
|
||||
}
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
|
||||
// Make a new Identity object, that we might wind up throwing away.
|
||||
// We'll only add it if we get a valid self-signature over this
|
||||
// userID.
|
||||
identity := new(Identity)
|
||||
identity.Name = pkt.Id
|
||||
identity.UserId = pkt
|
||||
|
||||
for {
|
||||
p, err := packets.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sig, ok := p.(*packet.Signature)
|
||||
if !ok {
|
||||
packets.Unread(p)
|
||||
break
|
||||
}
|
||||
|
||||
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
|
||||
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
|
||||
return errors.StructuralError("user ID self-signature invalid: " + err.Error())
|
||||
}
|
||||
identity.SelfSignature = sig
|
||||
e.Identities[pkt.Id] = identity
|
||||
} else {
|
||||
identity.Signatures = append(identity.Signatures, sig)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
|
||||
var subKey Subkey
|
||||
subKey.PublicKey = pub
|
||||
subKey.PrivateKey = priv
|
||||
|
||||
for {
|
||||
p, err := packets.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errors.StructuralError("subkey signature invalid: " + err.Error())
|
||||
}
|
||||
|
||||
sig, ok := p.(*packet.Signature)
|
||||
if !ok {
|
||||
packets.Unread(p)
|
||||
break
|
||||
}
|
||||
|
||||
if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
|
||||
return errors.StructuralError("subkey signature with wrong type")
|
||||
}
|
||||
|
||||
if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
|
||||
return errors.StructuralError("subkey signature invalid: " + err.Error())
|
||||
}
|
||||
|
||||
switch sig.SigType {
|
||||
case packet.SigTypeSubkeyRevocation:
|
||||
subKey.Sig = sig
|
||||
case packet.SigTypeSubkeyBinding:
|
||||
|
||||
if shouldReplaceSubkeySig(subKey.Sig, sig) {
|
||||
subKey.Sig = sig
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if subKey.Sig == nil {
|
||||
return errors.StructuralError("subkey packet not followed by signature")
|
||||
}
|
||||
|
||||
e.Subkeys = append(e.Subkeys, subKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
|
||||
if potentialNewSig == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if existingSig == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if existingSig.SigType == packet.SigTypeSubkeyRevocation {
|
||||
return false // never override a revocation signature
|
||||
}
|
||||
|
||||
return potentialNewSig.CreationTime.After(existingSig.CreationTime)
|
||||
}
|
||||
|
||||
const defaultRSAKeyBits = 2048
|
||||
|
||||
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
|
||||
// single identity composed of the given full name, comment and email, any of
|
||||
// which may be empty but must not contain any of "()<>\x00".
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
|
||||
creationTime := config.Now()
|
||||
|
||||
bits := defaultRSAKeyBits
|
||||
if config != nil && config.RSABits != 0 {
|
||||
bits = config.RSABits
|
||||
}
|
||||
|
||||
uid := packet.NewUserId(name, comment, email)
|
||||
if uid == nil {
|
||||
return nil, errors.InvalidArgumentError("user id field contained invalid characters")
|
||||
}
|
||||
signingPriv, err := rsa.GenerateKey(config.Random(), bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e := &Entity{
|
||||
PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey),
|
||||
PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv),
|
||||
Identities: make(map[string]*Identity),
|
||||
}
|
||||
isPrimaryId := true
|
||||
e.Identities[uid.Id] = &Identity{
|
||||
Name: uid.Id,
|
||||
UserId: uid,
|
||||
SelfSignature: &packet.Signature{
|
||||
CreationTime: creationTime,
|
||||
SigType: packet.SigTypePositiveCert,
|
||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||
Hash: config.Hash(),
|
||||
IsPrimaryId: &isPrimaryId,
|
||||
FlagsValid: true,
|
||||
FlagSign: true,
|
||||
FlagCertify: true,
|
||||
IssuerKeyId: &e.PrimaryKey.KeyId,
|
||||
},
|
||||
}
|
||||
err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the user passes in a DefaultHash via packet.Config,
|
||||
// set the PreferredHash for the SelfSignature.
|
||||
if config != nil && config.DefaultHash != 0 {
|
||||
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
|
||||
}
|
||||
|
||||
// Likewise for DefaultCipher.
|
||||
if config != nil && config.DefaultCipher != 0 {
|
||||
e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
|
||||
}
|
||||
|
||||
e.Subkeys = make([]Subkey, 1)
|
||||
e.Subkeys[0] = Subkey{
|
||||
PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey),
|
||||
PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv),
|
||||
Sig: &packet.Signature{
|
||||
CreationTime: creationTime,
|
||||
SigType: packet.SigTypeSubkeyBinding,
|
||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||
Hash: config.Hash(),
|
||||
FlagsValid: true,
|
||||
FlagEncryptStorage: true,
|
||||
FlagEncryptCommunications: true,
|
||||
IssuerKeyId: &e.PrimaryKey.KeyId,
|
||||
},
|
||||
}
|
||||
e.Subkeys[0].PublicKey.IsSubkey = true
|
||||
e.Subkeys[0].PrivateKey.IsSubkey = true
|
||||
err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// SerializePrivate serializes an Entity, including private key material, but
|
||||
// excluding signatures from other entities, to the given Writer.
|
||||
// Identities and subkeys are re-signed in case they changed since NewEntry.
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
|
||||
err = e.PrivateKey.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, ident := range e.Identities {
|
||||
err = ident.UserId.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = ident.SelfSignature.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, subkey := range e.Subkeys {
|
||||
err = subkey.PrivateKey.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = subkey.Sig.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize writes the public part of the given Entity to w, including
|
||||
// signatures from other entities. No private key material will be output.
|
||||
func (e *Entity) Serialize(w io.Writer) error {
|
||||
err := e.PrimaryKey.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ident := range e.Identities {
|
||||
err = ident.UserId.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ident.SelfSignature.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sig := range ident.Signatures {
|
||||
err = sig.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, subkey := range e.Subkeys {
|
||||
err = subkey.PublicKey.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = subkey.Sig.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignIdentity adds a signature to e, from signer, attesting that identity is
|
||||
// associated with e. The provided identity must already be an element of
|
||||
// e.Identities and the private key of signer must have been decrypted if
|
||||
// necessary.
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
|
||||
if signer.PrivateKey == nil {
|
||||
return errors.InvalidArgumentError("signing Entity must have a private key")
|
||||
}
|
||||
if signer.PrivateKey.Encrypted {
|
||||
return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
|
||||
}
|
||||
ident, ok := e.Identities[identity]
|
||||
if !ok {
|
||||
return errors.InvalidArgumentError("given identity string not found in Entity")
|
||||
}
|
||||
|
||||
sig := &packet.Signature{
|
||||
SigType: packet.SigTypeGenericCert,
|
||||
PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
|
||||
Hash: config.Hash(),
|
||||
CreationTime: config.Now(),
|
||||
IssuerKeyId: &signer.PrivateKey.KeyId,
|
||||
}
|
||||
if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
|
||||
return err
|
||||
}
|
||||
ident.Signatures = append(ident.Signatures, sig)
|
||||
return nil
|
||||
}
|
||||
123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go
generated
vendored
Normal file
123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packet
|
||||
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"compress/flate"
|
||||
"compress/zlib"
|
||||
"golang.org/x/crypto/openpgp/errors"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Compressed represents a compressed OpenPGP packet. The decompressed contents
|
||||
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
|
||||
type Compressed struct {
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
const (
|
||||
NoCompression = flate.NoCompression
|
||||
BestSpeed = flate.BestSpeed
|
||||
BestCompression = flate.BestCompression
|
||||
DefaultCompression = flate.DefaultCompression
|
||||
)
|
||||
|
||||
// CompressionConfig contains compressor configuration settings.
|
||||
type CompressionConfig struct {
|
||||
// Level is the compression level to use. It must be set to
|
||||
// between -1 and 9, with -1 causing the compressor to use the
|
||||
// default compression level, 0 causing the compressor to use
|
||||
// no compression and 1 to 9 representing increasing (better,
|
||||
// slower) compression levels. If Level is less than -1 or
|
||||
// more then 9, a non-nil error will be returned during
|
||||
// encryption. See the constants above for convenient common
|
||||
// settings for Level.
|
||||
Level int
|
||||
}
|
||||
|
||||
func (c *Compressed) parse(r io.Reader) error {
|
||||
var buf [1]byte
|
||||
_, err := readFull(r, buf[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch buf[0] {
|
||||
case 1:
|
||||
c.Body = flate.NewReader(r)
|
||||
case 2:
|
||||
c.Body, err = zlib.NewReader(r)
|
||||
case 3:
|
||||
c.Body = bzip2.NewReader(r)
|
||||
default:
|
||||
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// compressedWriteCloser represents the serialized compression stream
|
||||
// header and the compressor. Its Close() method ensures that both the
|
||||
// compressor and serialized stream header are closed. Its Write()
|
||||
// method writes to the compressor.
|
||||
type compressedWriteCloser struct {
|
||||
sh io.Closer // Stream Header
|
||||
c io.WriteCloser // Compressor
|
||||
}
|
||||
|
||||
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
|
||||
return cwc.c.Write(p)
|
||||
}
|
||||
|
||||
func (cwc compressedWriteCloser) Close() (err error) {
|
||||
err = cwc.c.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cwc.sh.Close()
|
||||
}
|
||||
|
||||
// SerializeCompressed serializes a compressed data packet to w and
|
||||
// returns a WriteCloser to which the literal data packets themselves
|
||||
// can be written and which MUST be closed on completion. If cc is
|
||||
// nil, sensible defaults will be used to configure the compression
|
||||
// algorithm.
|
||||
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
|
||||
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = compressed.Write([]byte{uint8(algo)})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
level := DefaultCompression
|
||||
if cc != nil {
|
||||
level = cc.Level
|
||||
}
|
||||
|
||||
var compressor io.WriteCloser
|
||||
switch algo {
|
||||
case CompressionZIP:
|
||||
compressor, err = flate.NewWriter(compressed, level)
|
||||
case CompressionZLIB:
|
||||
compressor, err = zlib.NewWriterLevel(compressed, level)
|
||||
default:
|
||||
s := strconv.Itoa(int(algo))
|
||||
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
literaldata = compressedWriteCloser{compressed, compressor}
|
||||
|
||||
return
|
||||
}
|
||||
91
vendor/golang.org/x/crypto/openpgp/packet/config.go
generated
vendored
Normal file
91
vendor/golang.org/x/crypto/openpgp/packet/config.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packet
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config collects a number of parameters along with sensible defaults.
|
||||
// A nil *Config is valid and results in all default values.
|
||||
type Config struct {
|
||||
// Rand provides the source of entropy.
|
||||
// If nil, the crypto/rand Reader is used.
|
||||
Rand io.Reader
|
||||
// DefaultHash is the default hash function to be used.
|
||||
// If zero, SHA-256 is used.
|
||||
DefaultHash crypto.Hash
|
||||
// DefaultCipher is the cipher to be used.
|
||||
// If zero, AES-128 is used.
|
||||
DefaultCipher CipherFunction
|
||||
// Time returns the current time as the number of seconds since the
|
||||
// epoch. If Time is nil, time.Now is used.
|
||||
Time func() time.Time
|
||||
// DefaultCompressionAlgo is the compression algorithm to be
|
||||
// applied to the plaintext before encryption. If zero, no
|
||||
// compression is done.
|
||||
DefaultCompressionAlgo CompressionAlgo
|
||||
// CompressionConfig configures the compression settings.
|
||||
CompressionConfig *CompressionConfig
|
||||
// S2KCount is only used for symmetric encryption. It
|
||||
// determines the strength of the passphrase stretching when
|
||||
// the said passphrase is hashed to produce a key. S2KCount
|
||||
// should be between 1024 and 65011712, inclusive. If Config
|
||||
// is nil or S2KCount is 0, the value 65536 used. Not all
|
||||
// values in the above range can be represented. S2KCount will
|
||||
// be rounded up to the next representable value if it cannot
|
||||
// be encoded exactly. When set, it is strongly encrouraged to
|
||||
// use a value that is at least 65536. See RFC 4880 Section
|
||||
// 3.7.1.3.
|
||||
S2KCount int
|
||||
// RSABits is the number of bits in new RSA keys made with NewEntity.
|
||||
// If zero, then 2048 bit keys are created.
|
||||
RSABits int
|
||||
}
|
||||
|
||||
func (c *Config) Random() io.Reader {
|
||||
if c == nil || c.Rand == nil {
|
||||
return rand.Reader
|
||||
}
|
||||
return c.Rand
|
||||
}
|
||||
|
||||
func (c *Config) Hash() crypto.Hash {
|
||||
if c == nil || uint(c.DefaultHash) == 0 {
|
||||
return crypto.SHA256
|
||||
}
|
||||
return c.DefaultHash
|
||||
}
|
||||
|
||||
func (c *Config) Cipher() CipherFunction {
|
||||
if c == nil || uint8(c.DefaultCipher) == 0 {
|
||||
return CipherAES128
|
||||
}
|
||||
return c.DefaultCipher
|
||||
}
|
||||
|
||||
func (c *Config) Now() time.Time {
|
||||
if c == nil || c.Time == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return c.Time()
|
||||
}
|
||||
|
||||
func (c *Config) Compression() CompressionAlgo {
|
||||
if c == nil {
|
||||
return CompressionNone
|
||||
}
|
||||
return c.DefaultCompressionAlgo
|
||||
}
|
||||
|
||||
func (c *Config) PasswordHashIterations() int {
|
||||
if c == nil || c.S2KCount == 0 {
|
||||
return 0
|
||||
}
|
||||
return c.S2KCount
|
||||
}
|
||||
208
vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
Normal file
208
vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packet
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/openpgp/elgamal"
|
||||
"golang.org/x/crypto/openpgp/errors"
|
||||
)
|
||||
|
||||
const encryptedKeyVersion = 3
|
||||
|
||||
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
|
||||
// section 5.1.
|
||||
type EncryptedKey struct {
|
||||
KeyId uint64
|
||||
Algo PublicKeyAlgorithm
|
||||
CipherFunc CipherFunction // only valid after a successful Decrypt
|
||||
Key []byte // only valid after a successful Decrypt
|
||||
|
||||
encryptedMPI1, encryptedMPI2 parsedMPI
|
||||
}
|
||||
|
||||
func (e *EncryptedKey) parse(r io.Reader) (err error) {
|
||||
var buf [10]byte
|
||||
_, err = readFull(r, buf[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if buf[0] != encryptedKeyVersion {
|
||||
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
|
||||
e.Algo = PublicKeyAlgorithm(buf[9])
|
||||
switch e.Algo {
|
||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case PubKeyAlgoElGamal:
|
||||
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err = consumeAll(r)
|
||||
return
|
||||
}
|
||||
|
||||
func checksumKeyMaterial(key []byte) uint16 {
|
||||
var checksum uint16
|
||||
for _, v := range key {
|
||||
checksum += uint16(v)
|
||||
}
|
||||
return checksum
|
||||
}
|
||||
|
||||
// Decrypt decrypts an encrypted session key with the given private key. The
|
||||
// private key must have been decrypted first.
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
|
||||
var err error
|
||||
var b []byte
|
||||
|
||||
// TODO(agl): use session key decryption routines here to avoid
|
||||
// padding oracle attacks.
|
||||
switch priv.PubKeyAlgo {
|
||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||
// Supports both *rsa.PrivateKey and crypto.Decrypter
|
||||
k := priv.PrivateKey.(crypto.Decrypter)
|
||||
b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil)
|
||||
case PubKeyAlgoElGamal:
|
||||
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
|
||||
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
|
||||
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
|
||||
default:
|
||||
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.CipherFunc = CipherFunction(b[0])
|
||||
e.Key = b[1 : len(b)-2]
|
||||
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
|
||||
checksum := checksumKeyMaterial(e.Key)
|
||||
if checksum != expectedChecksum {
|
||||
return errors.StructuralError("EncryptedKey checksum incorrect")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize writes the encrypted key packet, e, to w.
|
||||
func (e *EncryptedKey) Serialize(w io.Writer) error {
|
||||
var mpiLen int
|
||||
switch e.Algo {
|
||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||
mpiLen = 2 + len(e.encryptedMPI1.bytes)
|
||||
case PubKeyAlgoElGamal:
|
||||
mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
|
||||
default:
|
||||
return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
|
||||
}
|
||||
|
||||
serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
|
||||
|
||||
w.Write([]byte{encryptedKeyVersion})
|
||||
binary.Write(w, binary.BigEndian, e.KeyId)
|
||||
w.Write([]byte{byte(e.Algo)})
|
||||
|
||||
switch e.Algo {
|
||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||
writeMPIs(w, e.encryptedMPI1)
|
||||
case PubKeyAlgoElGamal:
|
||||
writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
|
||||
default:
|
||||
panic("internal error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
|
||||
// key, encrypted to pub.
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
|
||||
var buf [10]byte
|
||||
buf[0] = encryptedKeyVersion
|
||||
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
|
||||
buf[9] = byte(pub.PubKeyAlgo)
|
||||
|
||||
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
|
||||
keyBlock[0] = byte(cipherFunc)
|
||||
copy(keyBlock[1:], key)
|
||||
checksum := checksumKeyMaterial(key)
|
||||
keyBlock[1+len(key)] = byte(checksum >> 8)
|
||||
keyBlock[1+len(key)+1] = byte(checksum)
|
||||
|
||||
switch pub.PubKeyAlgo {
|
||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||
return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
|
||||
case PubKeyAlgoElGamal:
|
||||
return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
|
||||
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
|
||||
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
|
||||
}
|
||||
|
||||
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
|
||||
}
|
||||
|
||||
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
|
||||
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
|
||||
if err != nil {
|
||||
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
|
||||
}
|
||||
|
||||
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
|
||||
|
||||
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(header[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
|
||||
}
|
||||
|
||||
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
|
||||
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
|
||||
if err != nil {
|
||||
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
|
||||
}
|
||||
|
||||
packetLen := 10 /* header length */
|
||||
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
|
||||
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
|
||||
|
||||
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(header[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = writeBig(w, c1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeBig(w, c2)
|
||||
}
|
||||
89
vendor/golang.org/x/crypto/openpgp/packet/literal.go
generated
vendored
Normal file
89
vendor/golang.org/x/crypto/openpgp/packet/literal.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
|
||||
type LiteralData struct {
|
||||
IsBinary bool
|
||||
FileName string
|
||||
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
// ForEyesOnly returns whether the contents of the LiteralData have been marked
|
||||
// as especially sensitive.
|
||||
func (l *LiteralData) ForEyesOnly() bool {
|
||||
return l.FileName == "_CONSOLE"
|
||||
}
|
||||
|
||||
func (l *LiteralData) parse(r io.Reader) (err error) {
|
||||
var buf [256]byte
|
||||
|
||||
_, err = readFull(r, buf[:2])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
l.IsBinary = buf[0] == 'b'
|
||||
fileNameLen := int(buf[1])
|
||||
|
||||
_, err = readFull(r, buf[:fileNameLen])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
l.FileName = string(buf[:fileNameLen])
|
||||
|
||||
_, err = readFull(r, buf[:4])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
l.Time = binary.BigEndian.Uint32(buf[:4])
|
||||
l.Body = r
|
||||
return
|
||||
}
|
||||
|
||||
// SerializeLiteral serializes a literal data packet to w and returns a
|
||||
// WriteCloser to which the data itself can be written and which MUST be closed
|
||||
// on completion. The fileName is truncated to 255 bytes.
|
||||
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
|
||||
var buf [4]byte
|
||||
buf[0] = 't'
|
||||
if isBinary {
|
||||
buf[0] = 'b'
|
||||
}
|
||||
if len(fileName) > 255 {
|
||||
fileName = fileName[:255]
|
||||
}
|
||||
buf[1] = byte(len(fileName))
|
||||
|
||||
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = inner.Write(buf[:2])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = inner.Write([]byte(fileName))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf[:], time)
|
||||
_, err = inner.Write(buf[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
plaintext = inner
|
||||
return
|
||||
}
|
||||
143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
generated
vendored
Normal file
143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
|
||||
|
||||
package packet
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
)
|
||||
|
||||
type ocfbEncrypter struct {
|
||||
b cipher.Block
|
||||
fre []byte
|
||||
outUsed int
|
||||
}
|
||||
|
||||
// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
|
||||
// performed.
|
||||
type OCFBResyncOption bool
|
||||
|
||||
const (
|
||||
OCFBResync OCFBResyncOption = true
|
||||
OCFBNoResync OCFBResyncOption = false
|
||||
)
|
||||
|
||||
// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
|
||||
// cipher feedback mode using the given cipher.Block, and an initial amount of
|
||||
// ciphertext. randData must be random bytes and be the same length as the
|
||||
// cipher.Block's block size. Resync determines if the "resynchronization step"
|
||||
// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
|
||||
// this point.
|
||||
func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
|
||||
blockSize := block.BlockSize()
|
||||
if len(randData) != blockSize {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
x := &ocfbEncrypter{
|
||||
b: block,
|
||||
fre: make([]byte, blockSize),
|
||||
outUsed: 0,
|
||||
}
|
||||
prefix := make([]byte, blockSize+2)
|
||||
|
||||
block.Encrypt(x.fre, x.fre)
|
||||
for i := 0; i < blockSize; i++ {
|
||||
prefix[i] = randData[i] ^ x.fre[i]
|
||||
}
|
||||
|
||||
block.Encrypt(x.fre, prefix[:blockSize])
|
||||
prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
|
||||
prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
|
||||
|
||||
if resync {
|
||||
block.Encrypt(x.fre, prefix[2:])
|
||||
} else {
|
||||
x.fre[0] = prefix[blockSize]
|
||||
x.fre[1] = prefix[blockSize+1]
|
||||
x.outUsed = 2
|
||||
}
|
||||
return x, prefix
|
||||
}
|
||||
|
||||
func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
|
||||
for i := 0; i < len(src); i++ {
|
||||
if x.outUsed == len(x.fre) {
|
||||
x.b.Encrypt(x.fre, x.fre)
|
||||
x.outUsed = 0
|
||||
}
|
||||
|
||||
x.fre[x.outUsed] ^= src[i]
|
||||
dst[i] = x.fre[x.outUsed]
|
||||
x.outUsed++
|
||||
}
|
||||
}
|
||||
|
||||
type ocfbDecrypter struct {
|
||||
b cipher.Block
|
||||
fre []byte
|
||||
outUsed int
|
||||
}
|
||||
|
||||
// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
|
||||
// cipher feedback mode using the given cipher.Block. Prefix must be the first
|
||||
// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
|
||||
// block size. If an incorrect key is detected then nil is returned. On
|
||||
// successful exit, blockSize+2 bytes of decrypted data are written into
|
||||
// prefix. Resync determines if the "resynchronization step" from RFC 4880,
|
||||
// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
|
||||
func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
|
||||
blockSize := block.BlockSize()
|
||||
if len(prefix) != blockSize+2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
x := &ocfbDecrypter{
|
||||
b: block,
|
||||
fre: make([]byte, blockSize),
|
||||
outUsed: 0,
|
||||
}
|
||||
prefixCopy := make([]byte, len(prefix))
|
||||
copy(prefixCopy, prefix)
|
||||
|
||||
block.Encrypt(x.fre, x.fre)
|
||||
for i := 0; i < blockSize; i++ {
|
||||
prefixCopy[i] ^= x.fre[i]
|
||||
}
|
||||
|
||||
block.Encrypt(x.fre, prefix[:blockSize])
|
||||
prefixCopy[blockSize] ^= x.fre[0]
|
||||
prefixCopy[blockSize+1] ^= x.fre[1]
|
||||
|
||||
if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
|
||||
prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
|
||||
return nil
|
||||
}
|
||||
|
||||
if resync {
|
||||
block.Encrypt(x.fre, prefix[2:])
|
||||
} else {
|
||||
x.fre[0] = prefix[blockSize]
|
||||
x.fre[1] = prefix[blockSize+1]
|
||||
x.outUsed = 2
|
||||
}
|
||||
copy(prefix, prefixCopy)
|
||||
return x
|
||||
}
|
||||
|
||||
func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
|
||||
for i := 0; i < len(src); i++ {
|
||||
if x.outUsed == len(x.fre) {
|
||||
x.b.Encrypt(x.fre, x.fre)
|
||||
x.outUsed = 0
|
||||
}
|
||||
|
||||
c := src[i]
|
||||
dst[i] = x.fre[x.outUsed] ^ src[i]
|
||||
x.fre[x.outUsed] = c
|
||||
x.outUsed++
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user