vendor and go mod. add ldap file
This commit is contained in:
parent
29efc7be3f
commit
55ae63dc1d
8
go.mod
8
go.mod
|
@ -1 +1,7 @@
|
|||
module github.com/pyke369/golang-support/uconfig
|
||||
module gitlab.dm.gg/vwf/openvpn-dm-mgt-server
|
||||
|
||||
require (
|
||||
github.com/pyke369/golang-support v0.0.0-20190703174728-34ca97aa79e9
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/ldap.v2 v2.5.1
|
||||
)
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
github.com/pyke369/golang-support v0.0.0-20190703174728-34ca97aa79e9 h1:H1vjQ+Mfc8dFAOTuF541/tScdKoynzll9iKuWgaLLxM=
|
||||
github.com/pyke369/golang-support v0.0.0-20190703174728-34ca97aa79e9/go.mod h1:0XGrzgrEp0fa/+JSV8XZePUwyjnU6C3bMc7Xz2bHHKI=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
|
||||
gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU=
|
||||
gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
|
|
@ -0,0 +1,172 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/ldap.v2"
|
||||
)
|
||||
|
||||
type ldapConfig struct {
|
||||
servers []string
|
||||
baseDN string
|
||||
bindCn string
|
||||
bindPw string
|
||||
searchFilter string
|
||||
primaryAttribute string
|
||||
secondaryAttribute string
|
||||
validGroups []string
|
||||
otpType string
|
||||
certAuth string
|
||||
ipMin net.IP
|
||||
ipMax net.IP
|
||||
upgradeFrom string
|
||||
}
|
||||
|
||||
func (l *ldapConfig) addIPRange(s string) error {
|
||||
ips := strings.Split(s, "-")
|
||||
if len(ips) != 2 {
|
||||
return errors.New("invalid IPs")
|
||||
}
|
||||
if ip := net.ParseIP(ips[0]); ip != nil {
|
||||
l.ipMin = ip
|
||||
}
|
||||
if ip := net.ParseIP(ips[1]); ip != nil {
|
||||
l.ipMax = ip
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// override the real DialTLS function
|
||||
func myDialTLS(network, addr string, config *tls.Config) (*ldap.Conn, error) {
|
||||
dc, err := net.DialTimeout(network, addr, 3*time.Second)
|
||||
if err != nil {
|
||||
return nil, ldap.NewError(ldap.ErrorNetwork, err)
|
||||
}
|
||||
c := tls.Client(dc, config)
|
||||
if err = c.Handshake(); err != nil {
|
||||
// Handshake error, close the established connection before we return an error
|
||||
dc.Close()
|
||||
return nil, ldap.NewError(ldap.ErrorNetwork, err)
|
||||
}
|
||||
conn := ldap.NewConn(c, true)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (conf *ldapConfig) Auth(logins []string, pass string) (e error, userOk, passOk bool, attributes []string) {
|
||||
var primary, secondary []string
|
||||
|
||||
// special case. This configuration is a filter on the previous one
|
||||
if len(conf.servers) == 0 && len(conf.validGroups) > 0 {
|
||||
if inArray(logins, conf.validGroups) {
|
||||
return nil, true, false, logins
|
||||
}
|
||||
}
|
||||
|
||||
if len(logins) != 1 {
|
||||
return errors.New("invalid login"), false, false, nil
|
||||
}
|
||||
attributes = logins
|
||||
|
||||
for _, s := range conf.servers {
|
||||
// we force ldaps because we can
|
||||
l, err := myDialTLS("tcp", s+":636", &tls.Config{ServerName: s})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
// First bind with a read only user
|
||||
if err = l.Bind(conf.bindCn, conf.bindPw); err != nil {
|
||||
log.Println(err)
|
||||
return err, false, false, nil
|
||||
}
|
||||
|
||||
search := []string{"dn", conf.primaryAttribute}
|
||||
if conf.secondaryAttribute != "" {
|
||||
search = append(search, conf.secondaryAttribute)
|
||||
}
|
||||
|
||||
// search the user
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
conf.baseDN,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
fmt.Sprintf(conf.searchFilter, logins[0]),
|
||||
search,
|
||||
nil,
|
||||
)
|
||||
|
||||
sr, err := l.Search(searchRequest)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return err, false, false, nil
|
||||
}
|
||||
if len(sr.Entries) != 1 {
|
||||
log.Println("User does not exist or too many entries returned")
|
||||
return nil, false, false, nil
|
||||
}
|
||||
|
||||
// check the attributes requested in the search
|
||||
// a valid account must be part of the correct group (per instance)
|
||||
for _, attribute := range sr.Entries[0].Attributes {
|
||||
if (*attribute).Name == conf.primaryAttribute {
|
||||
primary = attribute.Values
|
||||
}
|
||||
if (*attribute).Name == conf.secondaryAttribute {
|
||||
secondary = attribute.Values
|
||||
}
|
||||
}
|
||||
|
||||
// user must have both primary and secondary attributes
|
||||
if len(primary) == 0 {
|
||||
log.Printf("User has no %s attribute", conf.primaryAttribute)
|
||||
return nil, false, false, nil
|
||||
}
|
||||
|
||||
if len(secondary) == 0 {
|
||||
log.Printf("User has no %s attribute", conf.secondaryAttribute)
|
||||
return nil, false, false, nil
|
||||
}
|
||||
|
||||
// check if the primary attributes are in the validGroups list
|
||||
if len(conf.validGroups) > 0 && !inArray(conf.validGroups, primary) {
|
||||
return nil, false, false, nil
|
||||
}
|
||||
|
||||
// if there is no validGroups check, pass the primary attributes to the
|
||||
// next level
|
||||
if len(conf.validGroups) == 0 {
|
||||
attributes = primary
|
||||
} else {
|
||||
attributes = secondary
|
||||
}
|
||||
|
||||
log.Printf("User has a valid account on %s", s)
|
||||
|
||||
userdn := sr.Entries[0].DN
|
||||
|
||||
// if the password is empty, stop here
|
||||
if pass == "" {
|
||||
return nil, true, false, attributes
|
||||
}
|
||||
|
||||
// if there is an error, it's because the password is invalid
|
||||
if err = l.Bind(userdn, pass); err != nil {
|
||||
return nil, true, false, attributes
|
||||
}
|
||||
|
||||
// everything is fine,
|
||||
log.Printf("User has a valid password on %s", s)
|
||||
return nil, true, true, attributes
|
||||
}
|
||||
// if we are here, no server is responding, rejectif auth
|
||||
log.Println("can't join any ldap server")
|
||||
return
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/pyke369/golang-support/uconfig"
|
||||
)
|
||||
|
||||
// check if there is a member of "search" in "list". sort list to be more
|
||||
// efficiant. adapt with sorting "search" too
|
||||
func inArray(search, list []string) bool {
|
||||
sort.Strings(list)
|
||||
for _, g := range search {
|
||||
i := sort.Search(len(list), func(i int) bool { return list[i] >= g })
|
||||
if i < len(list) && list[i] == g {
|
||||
return true
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse a uconf array
|
||||
func parseConfigArray(config *uconfig.UConfig, configpath string) []string {
|
||||
result := []string{}
|
||||
for _, i := range config.GetPaths(configpath) {
|
||||
if s := config.GetString(i, ""); s == "" {
|
||||
continue
|
||||
} else {
|
||||
result = append(result, s)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
package bslab
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
type slab struct {
|
||||
queue chan []byte
|
||||
get, put, alloc, lost int64
|
||||
}
|
||||
|
||||
var (
|
||||
slabs = map[int]*slab{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
slabs[0] = &slab{}
|
||||
for size := uint(9); size <= 26; size++ {
|
||||
slabs[1<<size] = &slab{queue: make(chan []byte, 1024)}
|
||||
}
|
||||
}
|
||||
|
||||
func Stats() (info map[int][5]int64) {
|
||||
info = map[int][5]int64{}
|
||||
for size, slab := range slabs {
|
||||
info[size] = [5]int64{slab.get, slab.put, slab.alloc, slab.lost, int64(len(slab.queue))}
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func Get(size int, item []byte) []byte {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
if item != nil {
|
||||
if cap(item) >= size {
|
||||
return item[:0]
|
||||
}
|
||||
Put(item)
|
||||
}
|
||||
bits, power := uint(0), uint(0)
|
||||
if size&(size-1) == 0 {
|
||||
power = 1
|
||||
}
|
||||
for size != 0 {
|
||||
size >>= 1
|
||||
bits++
|
||||
}
|
||||
size = 1 << (bits - power)
|
||||
if slab, ok := slabs[size]; ok {
|
||||
atomic.AddInt64(&(slab.get), 1)
|
||||
select {
|
||||
case item := <-slab.queue:
|
||||
return item[:0]
|
||||
default:
|
||||
atomic.AddInt64(&(slab.alloc), 1)
|
||||
return make([]byte, 0, size)
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(&(slabs[0].get), 1)
|
||||
atomic.AddInt64(&(slabs[0].alloc), int64(size))
|
||||
return make([]byte, 0, size)
|
||||
}
|
||||
|
||||
func Put(item []byte) {
|
||||
if item == nil || cap(item) <= 0 {
|
||||
return
|
||||
}
|
||||
size, bits := cap(item), uint(0)
|
||||
for size != 0 {
|
||||
size >>= 1
|
||||
bits++
|
||||
}
|
||||
size = 1 << (bits - 1)
|
||||
if size > 0 && float64(cap(item))/float64(size) <= 1.2 {
|
||||
if slab, ok := slabs[size]; ok {
|
||||
atomic.AddInt64(&(slab.put), 1)
|
||||
select {
|
||||
case slab.queue <- item:
|
||||
default:
|
||||
atomic.AddInt64(&(slab.lost), 1)
|
||||
}
|
||||
} else {
|
||||
atomic.AddInt64(&(slabs[0].put), 1)
|
||||
atomic.AddInt64(&(slabs[0].lost), int64(cap(item)))
|
||||
}
|
||||
} else {
|
||||
atomic.AddInt64(&(slabs[0].put), 1)
|
||||
atomic.AddInt64(&(slabs[0].lost), int64(cap(item)))
|
||||
}
|
||||
}
|
|
@ -1,356 +0,0 @@
|
|||
package chash
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
CHASH_MAGIC uint32 = 0x48414843
|
||||
CHASH_REPLICAS = 128
|
||||
)
|
||||
|
||||
type item struct {
|
||||
hash uint32
|
||||
target uint16
|
||||
}
|
||||
|
||||
type CHash struct {
|
||||
targets map[string]uint8
|
||||
names []string
|
||||
ring []item
|
||||
ringSize uint32
|
||||
replicas uint8
|
||||
frozen bool
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
var cores int
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
|
||||
func mmhash2(key []byte, keySize int) uint32 {
|
||||
var magic, hash, current, value uint32 = 0x5bd1e995, uint32(0x4d4d4832 ^ keySize), 0, 0
|
||||
|
||||
if keySize < 0 {
|
||||
keySize = len(key)
|
||||
}
|
||||
for keySize >= 4 {
|
||||
value = uint32(key[current]) | uint32(key[current+1])<<8 |
|
||||
uint32(key[current+2])<<16 | uint32(key[current+3])<<24
|
||||
value *= magic
|
||||
value ^= value >> 24
|
||||
value *= magic
|
||||
hash *= magic
|
||||
hash ^= value
|
||||
current += 4
|
||||
keySize -= 4
|
||||
}
|
||||
if keySize >= 3 {
|
||||
hash ^= uint32(key[current+2]) << 16
|
||||
}
|
||||
if keySize >= 2 {
|
||||
hash ^= uint32(key[current+1]) << 8
|
||||
}
|
||||
if keySize >= 1 {
|
||||
hash ^= uint32(key[current])
|
||||
}
|
||||
if keySize != 0 {
|
||||
hash *= magic
|
||||
}
|
||||
hash ^= hash >> 13
|
||||
hash *= magic
|
||||
hash ^= hash >> 15
|
||||
return hash
|
||||
}
|
||||
|
||||
type ByHash []item
|
||||
|
||||
func (a ByHash) Len() int { return len(a) }
|
||||
func (a ByHash) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByHash) Less(i, j int) bool { return a[i].hash < a[j].hash }
|
||||
|
||||
func (this *CHash) freeze() {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
if this.frozen {
|
||||
return
|
||||
}
|
||||
this.ringSize = 0
|
||||
for _, tweight := range this.targets {
|
||||
this.ringSize += uint32(tweight) * uint32(this.replicas)
|
||||
}
|
||||
if this.ringSize == 0 {
|
||||
this.frozen = true
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
target uint16 = 0
|
||||
offset uint32 = 0
|
||||
key []byte = make([]byte, 128)
|
||||
)
|
||||
this.names = make([]string, len(this.targets))
|
||||
this.ring = make([]item, this.ringSize)
|
||||
for tname, tweight := range this.targets {
|
||||
this.names[target] = tname
|
||||
for weight := uint8(0); weight < tweight; weight++ {
|
||||
for replica := uint8(0); replica < this.replicas; replica++ {
|
||||
key = append(key[:0], tname...)
|
||||
key = strconv.AppendInt(key, int64(weight), 10)
|
||||
key = strconv.AppendInt(key, int64(replica), 10)
|
||||
this.ring[offset] = item{mmhash2(key, -1), target}
|
||||
offset++
|
||||
}
|
||||
}
|
||||
target++
|
||||
}
|
||||
sort.Sort(ByHash(this.ring))
|
||||
this.frozen = true
|
||||
}
|
||||
|
||||
func New(replicas ...uint8) *CHash {
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
chash := &CHash{
|
||||
targets: make(map[string]uint8),
|
||||
names: nil,
|
||||
ring: nil,
|
||||
ringSize: 0,
|
||||
replicas: CHASH_REPLICAS,
|
||||
frozen: false,
|
||||
}
|
||||
if len(replicas) > 0 {
|
||||
chash.replicas = replicas[0]
|
||||
}
|
||||
if chash.replicas < 1 {
|
||||
chash.replicas = 1
|
||||
}
|
||||
if chash.replicas > CHASH_REPLICAS {
|
||||
chash.replicas = CHASH_REPLICAS
|
||||
}
|
||||
return chash
|
||||
}
|
||||
|
||||
func (this *CHash) AddTarget(name string, weight uint8) bool {
|
||||
if weight > 0 && weight <= 100 && len(name) <= 128 && this.targets[name] != weight {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
this.targets[name] = weight
|
||||
this.frozen = false
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (this *CHash) RemoveTarget(name string) bool {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
delete(this.targets, name)
|
||||
this.frozen = false
|
||||
return true
|
||||
}
|
||||
func (this *CHash) ClearTargets() bool {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
this.targets = make(map[string]uint8)
|
||||
this.frozen = false
|
||||
return true
|
||||
}
|
||||
|
||||
func (this *CHash) Serialize() []byte {
|
||||
this.freeze()
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
size := uint32(4) + 4 + 1 + 2 + 4
|
||||
for _, name := range this.names {
|
||||
size += 1 + 1 + uint32(len(name))
|
||||
}
|
||||
size += (this.ringSize * 6)
|
||||
serialized := make([]byte, size)
|
||||
offset := uint32(0)
|
||||
serialized[offset] = byte(CHASH_MAGIC & 0xff)
|
||||
serialized[offset+1] = byte((CHASH_MAGIC >> 8) & 0xff)
|
||||
serialized[offset+2] = byte((CHASH_MAGIC >> 16) & 0xff)
|
||||
serialized[offset+3] = byte((CHASH_MAGIC >> 24) & 0xff)
|
||||
serialized[offset+4] = byte(size & 0xff)
|
||||
serialized[offset+5] = byte((size >> 8) & 0xff)
|
||||
serialized[offset+6] = byte((size >> 16) & 0xff)
|
||||
serialized[offset+7] = byte((size >> 24) & 0xff)
|
||||
serialized[offset+8] = this.replicas
|
||||
serialized[offset+9] = byte(uint16(len(this.names)) & 0xff)
|
||||
serialized[offset+10] = byte(((uint16(len(this.names))) >> 8) & 0xff)
|
||||
serialized[offset+11] = byte(this.ringSize & 0xff)
|
||||
serialized[offset+12] = byte((this.ringSize >> 8) & 0xff)
|
||||
serialized[offset+13] = byte((this.ringSize >> 16) & 0xff)
|
||||
serialized[offset+14] = byte((this.ringSize >> 24) & 0xff)
|
||||
offset += 15
|
||||
for _, name := range this.names {
|
||||
serialized[offset] = this.targets[name]
|
||||
serialized[offset+1] = byte(len(name) & 0xff)
|
||||
copy(serialized[offset+2:offset+2+uint32(serialized[offset+1])], []byte(name))
|
||||
offset += 2 + uint32(serialized[offset+1])
|
||||
}
|
||||
for _, item := range this.ring {
|
||||
serialized[offset] = byte(item.hash & 0xff)
|
||||
serialized[offset+1] = byte((item.hash >> 8) & 0xff)
|
||||
serialized[offset+2] = byte((item.hash >> 16) & 0xff)
|
||||
serialized[offset+3] = byte((item.hash >> 24) & 0xff)
|
||||
serialized[offset+4] = byte(item.target & 0xff)
|
||||
serialized[offset+5] = byte((item.target >> 8) & 0xff)
|
||||
offset += 6
|
||||
}
|
||||
return serialized
|
||||
}
|
||||
func (this *CHash) FileSerialize(path string) bool {
|
||||
handle, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer handle.Close()
|
||||
if _, err := handle.Write(this.Serialize()); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (this *CHash) Unserialize(serialized []byte) bool {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
if len(serialized) < 15 {
|
||||
return false
|
||||
}
|
||||
magic := uint32(serialized[0]) + (uint32(serialized[1]) << 8) + (uint32(serialized[2]) << 16) + (uint32(serialized[3]) << 24)
|
||||
size := uint32(serialized[4]) + (uint32(serialized[5]) << 8) + (uint32(serialized[6]) << 16) + (uint32(serialized[7]) << 24)
|
||||
replicas := serialized[8]
|
||||
names := uint16(serialized[9]) + (uint16(serialized[10]) << 8)
|
||||
ringSize := uint32(serialized[11]) + (uint32(serialized[12]) << 8) + (uint32(serialized[13]) << 16) + (uint32(serialized[14]) << 24)
|
||||
if magic != CHASH_MAGIC || size != uint32(len(serialized)) {
|
||||
return false
|
||||
}
|
||||
this.targets = make(map[string]uint8)
|
||||
this.names = make([]string, names)
|
||||
this.ring = make([]item, ringSize)
|
||||
this.ringSize = ringSize
|
||||
this.replicas = replicas
|
||||
offset := uint32(15)
|
||||
for index := uint16(0); index < names && offset < size; index++ {
|
||||
len := uint32(serialized[offset+1])
|
||||
this.names[index] = string(serialized[offset+2 : offset+2+len])
|
||||
this.targets[this.names[index]] = serialized[offset]
|
||||
offset += 2 + len
|
||||
}
|
||||
if offset > size {
|
||||
return false
|
||||
}
|
||||
for item := uint32(0); item < ringSize && offset < size; item++ {
|
||||
this.ring[item].hash = uint32(serialized[offset]) + (uint32(serialized[offset+1]) << 8) + (uint32(serialized[offset+2]) << 16) + (uint32(serialized[offset+3]) << 24)
|
||||
this.ring[item].target = uint16(serialized[offset+4]) + (uint16(serialized[offset+5]) << 8)
|
||||
offset += 6
|
||||
}
|
||||
if offset != size {
|
||||
return false
|
||||
}
|
||||
this.frozen = true
|
||||
return true
|
||||
}
|
||||
func (this *CHash) FileUnserialize(path string) bool {
|
||||
handle, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer handle.Close()
|
||||
info, err := handle.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if info.Size() > 128*1024*1024 {
|
||||
return false
|
||||
}
|
||||
serialized := make([]byte, info.Size())
|
||||
read, err := handle.Read(serialized)
|
||||
if int64(read) != info.Size() || err != nil {
|
||||
return false
|
||||
}
|
||||
return this.Unserialize(serialized)
|
||||
}
|
||||
|
||||
func (this *CHash) Lookup(candidate string, count int) []string {
|
||||
var start uint32 = 0
|
||||
|
||||
this.freeze()
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
if count > len(this.targets) {
|
||||
count = len(this.targets)
|
||||
}
|
||||
if this.ringSize == 0 || count < 1 {
|
||||
return []string{}
|
||||
}
|
||||
hash := mmhash2([]byte(candidate), -1)
|
||||
if hash > this.ring[0].hash && hash <= this.ring[this.ringSize-1].hash {
|
||||
start = this.ringSize / 2
|
||||
span := start / 2
|
||||
for {
|
||||
if hash > this.ring[start].hash && hash <= this.ring[start+1].hash {
|
||||
break
|
||||
}
|
||||
if hash > this.ring[start].hash {
|
||||
start += span
|
||||
} else {
|
||||
start -= span
|
||||
}
|
||||
span /= 2
|
||||
if span < 1 {
|
||||
span = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
result := make([]string, count)
|
||||
rank := 0
|
||||
for rank < count {
|
||||
index := 0
|
||||
for index = 0; index < rank; index++ {
|
||||
if result[index] == this.names[this.ring[start].target] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if index >= rank {
|
||||
result[rank] = this.names[this.ring[start].target]
|
||||
rank++
|
||||
}
|
||||
start++
|
||||
if start >= this.ringSize {
|
||||
start = 0
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
func (this *CHash) LookupBalance(candidate string, count int) string {
|
||||
result := this.Lookup(candidate, count)
|
||||
if len(result) > 0 {
|
||||
return result[rand.Intn(len(result))]
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package dynacert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DYNACERT struct {
|
||||
Public, Key string
|
||||
Certificate *tls.Certificate
|
||||
Last, Modified time.Time
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
var cores int
|
||||
|
||||
func (this *DYNACERT) GetCertificate(*tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
|
||||
var info os.FileInfo
|
||||
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
if this.Certificate == nil || time.Now().Sub(this.Last) >= 10*time.Second {
|
||||
this.Last = time.Now()
|
||||
if info, err = os.Stat(this.Public); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = os.Stat(this.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if this.Certificate == nil || info.ModTime().Sub(this.Modified) != 0 {
|
||||
if certificate, err := tls.LoadX509KeyPair(this.Public, this.Key); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
this.Modified = info.ModTime()
|
||||
this.Certificate = &certificate
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
return this.Certificate, nil
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package fqdn
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func FQDN() (string, string) {
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
if addresses, err := net.LookupHost(hostname); err != nil {
|
||||
return hostname, "*"
|
||||
} else {
|
||||
for _, address := range addresses {
|
||||
if hostnames, err := net.LookupAddr(address); err == nil && len(hostnames) > 0 {
|
||||
for _, hostname := range hostnames {
|
||||
if strings.Count(hostname, ".") > 1 {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
addresses, _ = net.LookupHost(hostname)
|
||||
return hostname, addresses[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "unknown", "*"
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package listener
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type TCPListener struct {
|
||||
*net.TCPListener
|
||||
ReadBufferSize int
|
||||
WriteBufferSize int
|
||||
}
|
||||
|
||||
func (this *TCPListener) Accept() (net.Conn, error) {
|
||||
connection, err := this.AcceptTCP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rconnection, err := connection.SyscallConn(); err == nil {
|
||||
rconnection.Control(
|
||||
func(handle uintptr) {
|
||||
syscall.SetsockoptInt(int(handle), syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, 1)
|
||||
syscall.SetsockoptInt(int(handle), syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, 60)
|
||||
syscall.SetsockoptInt(int(handle), syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 10)
|
||||
syscall.SetsockoptInt(int(handle), syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 3)
|
||||
})
|
||||
}
|
||||
if this.ReadBufferSize > 0 {
|
||||
connection.SetReadBuffer(this.ReadBufferSize)
|
||||
}
|
||||
if this.WriteBufferSize > 0 {
|
||||
connection.SetWriteBuffer(this.WriteBufferSize)
|
||||
}
|
||||
return connection, nil
|
||||
}
|
||||
|
||||
func NewTCPListener(network, address string, reuseport bool, read, write int) (listener *TCPListener, err error) {
|
||||
config := net.ListenConfig{
|
||||
Control: func(network, address string, connection syscall.RawConn) error {
|
||||
connection.Control(func(handle uintptr) {
|
||||
if err := syscall.SetsockoptInt(int(handle), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1); err != nil {
|
||||
return
|
||||
}
|
||||
if reuseport {
|
||||
if err := syscall.SetsockoptInt(int(handle), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}}
|
||||
if clistener, err := config.Listen(context.Background(), network, address); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return &TCPListener{clistener.(*net.TCPListener), read, write}, nil
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
PFDB binary layout
|
||||
------------------
|
||||
"PFDB"
|
||||
VVVV (4)
|
||||
HHHHHHHHHHHHHHHH (16)
|
||||
"DESC"
|
||||
"DDDDDDDDDDDDDDDDDDDD" (20)
|
||||
"STRS"
|
||||
W (1)
|
||||
SSSS (4)
|
||||
CCCC (4)
|
||||
...
|
||||
"NUMS"
|
||||
SSSS (4)
|
||||
CCCC (4)
|
||||
...
|
||||
"PAIR"
|
||||
SSSS (4)
|
||||
CCCC (4)
|
||||
...
|
||||
"CLUS"
|
||||
W (1)
|
||||
SSSS (4)
|
||||
CCCC (4)
|
||||
...
|
||||
"MAPS"
|
||||
SSSS (4)
|
||||
CCCC (4)
|
||||
...
|
||||
"NODE"
|
||||
W (1)
|
||||
SSSS (4)
|
||||
CCCC (4)
|
||||
...
|
|
@ -1,11 +0,0 @@
|
|||
prefixdb: prefixdb.go
|
||||
@go build prefixdb.go && strip prefixdb
|
||||
|
||||
run: prefixdb
|
||||
@#./prefixdb city city.pfdb@'MMCITY 20190402' GeoIP2-City-Locations-en.csv GeoIP2-City-Blocks-IPv4.csv GeoIP2-City-Blocks-IPv6.csv
|
||||
@#./prefixdb asn asn.pfdb@'MMASN 20190402' GeoLite2-ASN-Blocks-IPv4.csv GeoLite2-ASN-Blocks-IPv6.csv
|
||||
@./prefixdb lookup city.pfdb asn.pfdb 78.193.67.63 188.65.124.26
|
||||
@#./prefixdb server *:8000 city.pfdb asn.pfdb
|
||||
|
||||
clean:
|
||||
@rm -f prefixdb *.pfdb
|
|
@ -1,5 +0,0 @@
|
|||
module .
|
||||
|
||||
go 1.12
|
||||
|
||||
require github.com/pyke369/golang-support v0.0.0-20190428173758-fae1fcd33c43 // indirect
|
|
@ -1,2 +0,0 @@
|
|||
github.com/pyke369/golang-support v0.0.0-20190428173758-fae1fcd33c43 h1:638A4GSCbTc/Z8N1TyymmC8iWQOE3BWnWJv9fzZeHJc=
|
||||
github.com/pyke369/golang-support v0.0.0-20190428173758-fae1fcd33c43/go.mod h1:0XGrzgrEp0fa/+JSV8XZePUwyjnU6C3bMc7Xz2bHHKI=
|
|
@ -1,367 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pyke369/golang-support/prefixdb"
|
||||
)
|
||||
|
||||
type LOCATION struct {
|
||||
ContinentCode string
|
||||
ContinentName string
|
||||
CountryCode string
|
||||
CountryName string
|
||||
RegionCode string
|
||||
RegionName string
|
||||
StateCode string
|
||||
StateName string
|
||||
CityName string
|
||||
TimeZone string
|
||||
InEurope bool
|
||||
}
|
||||
|
||||
var (
|
||||
csvMatcher = regexp.MustCompile(`(?:,|\n|^)("(?:(?:"")*[^"]*)*"|[^",\n]*|(?:\n|$))`)
|
||||
jsonMatcher = regexp.MustCompile(`^(\S+)(?:\s(\{.+?\}))?$`)
|
||||
pfdb = prefixdb.New()
|
||||
)
|
||||
|
||||
func size(input int) string {
|
||||
if input < 1024*1024 {
|
||||
return fmt.Sprintf("%.1fkB", float64(input)/1024)
|
||||
}
|
||||
return fmt.Sprintf("%.1fMB", float64(input)/(1024*1024))
|
||||
}
|
||||
|
||||
func mkjson() {
|
||||
for index := 3; index < len(os.Args); index++ {
|
||||
count := 0
|
||||
if handle, err := os.Open(os.Args[index]); err == nil {
|
||||
reader := bufio.NewReader(handle)
|
||||
last := time.Now()
|
||||
start := last
|
||||
for {
|
||||
if line, err := reader.ReadString('\n'); err != nil {
|
||||
break
|
||||
} else {
|
||||
if fields := jsonMatcher.FindStringSubmatch(strings.TrimSpace(line)); fields != nil {
|
||||
if _, prefix, err := net.ParseCIDR(fields[1]); err == nil {
|
||||
data := map[string]interface{}{}
|
||||
json.Unmarshal([]byte(fields[2]), &data)
|
||||
pfdb.Add(*prefix, data, [][]string{[]string{"key1", "key2"}})
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
if now := time.Now(); now.Sub(last) >= 250*time.Millisecond {
|
||||
last = now
|
||||
fmt.Fprintf(os.Stderr, "\r- adding prefixes [%s] %d", os.Args[index], count)
|
||||
}
|
||||
}
|
||||
handle.Close()
|
||||
fmt.Fprintf(os.Stderr, "\r- added prefixes [%s] (%.3fs - %d entries)\n", os.Args[index], float64(time.Now().Sub(start))/float64(time.Second), count)
|
||||
}
|
||||
}
|
||||
start := time.Now()
|
||||
description := ""
|
||||
if index := strings.Index(os.Args[2], "@"); index > 0 {
|
||||
description = os.Args[2][index+1:]
|
||||
os.Args[2] = os.Args[2][:index]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\r- saving database [%s]... ", os.Args[2])
|
||||
if _, err := pfdb.Save(os.Args[2], description); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "\r- saving database [%s] failed (%v)\n", os.Args[2], err)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "\r- saved database [%s] (%.3fs - total[%s] strings[%s] numbers[%s] pairs[%s] clusters[%s] maps[%s] nodes[%s])\n",
|
||||
os.Args[2], float64(time.Now().Sub(start))/float64(time.Second), size(pfdb.Total), size(pfdb.Strings[0]),
|
||||
size(pfdb.Numbers[0]), size(pfdb.Pairs[0]), size(pfdb.Clusters[0]), size(pfdb.Maps[0]), size(pfdb.Nodes[0]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func mkcity() {
|
||||
locations := map[int]*LOCATION{}
|
||||
if handle, err := os.Open(os.Args[3]); err == nil {
|
||||
reader := bufio.NewReader(handle)
|
||||
last := time.Now()
|
||||
start := last
|
||||
for {
|
||||
if line, err := reader.ReadString('\n'); err != nil {
|
||||
break
|
||||
} else {
|
||||
if fields := csvMatcher.FindAllStringSubmatch(strings.TrimSpace(line), -1); len(fields) == 14 {
|
||||
for index := 0; index < len(fields); index++ {
|
||||
fields[index][1] = strings.Trim(fields[index][1], `"`)
|
||||
}
|
||||
if id, err := strconv.Atoi(fields[0][1]); err == nil {
|
||||
locations[id] = &LOCATION{
|
||||
ContinentCode: fields[2][1],
|
||||
ContinentName: fields[3][1],
|
||||
CountryCode: fields[4][1],
|
||||
CountryName: fields[5][1],
|
||||
RegionCode: fields[6][1],
|
||||
RegionName: fields[7][1],
|
||||
StateCode: fields[8][1],
|
||||
StateName: fields[9][1],
|
||||
CityName: fields[10][1],
|
||||
TimeZone: fields[12][1],
|
||||
InEurope: fields[13][1] == "1",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if now := time.Now(); now.Sub(last) >= 250*time.Millisecond {
|
||||
last = now
|
||||
fmt.Fprintf(os.Stderr, "\r- loading locations [%s] %d", os.Args[3], len(locations))
|
||||
}
|
||||
}
|
||||
handle.Close()
|
||||
fmt.Fprintf(os.Stderr, "\r- loaded locations [%s] (%.3fs - %d entries)\n", os.Args[3], float64(time.Now().Sub(start))/float64(time.Second), len(locations))
|
||||
}
|
||||
|
||||
clusters := [][]string{
|
||||
[]string{"continent_code", "continent_name", "country_code", "country_name", "region_code", "region_name", "state_code", "state_name", "timezone", "in_europe"},
|
||||
[]string{"city_name", "postal_code", "latitude", "longitude"},
|
||||
}
|
||||
for index := 4; index < len(os.Args); index++ {
|
||||
count := 0
|
||||
if handle, err := os.Open(os.Args[index]); err == nil {
|
||||
reader := bufio.NewReader(handle)
|
||||
last := time.Now()
|
||||
start := last
|
||||
for {
|
||||
if line, err := reader.ReadString('\n'); err != nil {
|
||||
break
|
||||
} else {
|
||||
if fields := strings.Split(strings.TrimSpace(line), ","); len(fields) == 10 {
|
||||
id := 0
|
||||
if id, _ = strconv.Atoi(fields[1]); id == 0 {
|
||||
id, _ = strconv.Atoi(fields[2])
|
||||
}
|
||||
if id != 0 && locations[id] != nil {
|
||||
if _, prefix, err := net.ParseCIDR(fields[0]); err == nil {
|
||||
latitude, _ := strconv.ParseFloat(fields[7], 64)
|
||||
longitude, _ := strconv.ParseFloat(fields[8], 64)
|
||||
pfdb.Add(*prefix, map[string]interface{}{
|
||||
"continent_code": locations[id].ContinentCode,
|
||||
"continent_name": locations[id].ContinentName,
|
||||
"country_code": locations[id].CountryCode,
|
||||
"country_name": locations[id].CountryName,
|
||||
"region_code": locations[id].RegionCode,
|
||||
"region_name": locations[id].RegionName,
|
||||
"state_code": locations[id].StateCode,
|
||||
"state_name": locations[id].StateName,
|
||||
"city_name": locations[id].CityName,
|
||||
"in_europe": locations[id].InEurope,
|
||||
"timezone": locations[id].TimeZone,
|
||||
"postal_code": fields[6],
|
||||
"latitude": latitude,
|
||||
"longitude": longitude,
|
||||
}, clusters)
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if now := time.Now(); now.Sub(last) >= 250*time.Millisecond {
|
||||
last = now
|
||||
fmt.Fprintf(os.Stderr, "\r- adding prefixes [%s] %d", os.Args[index], count)
|
||||
}
|
||||
}
|
||||
handle.Close()
|
||||
fmt.Fprintf(os.Stderr, "\r- added prefixes [%s] (%.3fs - %d entries)\n", os.Args[index], float64(time.Now().Sub(start))/float64(time.Second), count)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
description := ""
|
||||
if index := strings.Index(os.Args[2], "@"); index > 0 {
|
||||
description = os.Args[2][index+1:]
|
||||
os.Args[2] = os.Args[2][:index]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\r- saving database [%s]... ", os.Args[2])
|
||||
if _, err := pfdb.Save(os.Args[2], description); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "\r- saving database [%s] failed (%v)\n", os.Args[2], err)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "\r- saved database [%s] (%.3fs - total[%s] strings[%s] numbers[%s] pairs[%s] clusters[%s] maps[%s] nodes[%s])\n",
|
||||
os.Args[2], float64(time.Now().Sub(start))/float64(time.Second), size(pfdb.Total), size(pfdb.Strings[0]),
|
||||
size(pfdb.Numbers[0]), size(pfdb.Pairs[0]), size(pfdb.Clusters[0]), size(pfdb.Maps[0]), size(pfdb.Nodes[0]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func mkasn() {
|
||||
for index := 3; index < len(os.Args); index++ {
|
||||
count := 0
|
||||
if handle, err := os.Open(os.Args[index]); err == nil {
|
||||
reader := bufio.NewReader(handle)
|
||||
last := time.Now()
|
||||
start := last
|
||||
for {
|
||||
if line, err := reader.ReadString('\n'); err != nil {
|
||||
break
|
||||
} else {
|
||||
if fields := csvMatcher.FindAllStringSubmatch(strings.TrimSpace(line), -1); len(fields) == 3 {
|
||||
for index := 0; index < len(fields); index++ {
|
||||
fields[index][1] = strings.Trim(fields[index][1], `"`)
|
||||
}
|
||||
if asnum, _ := strconv.Atoi(fields[1][1]); asnum != 0 {
|
||||
if _, prefix, err := net.ParseCIDR(fields[0][1]); err == nil {
|
||||
pfdb.Add(*prefix, map[string]interface{}{
|
||||
"as_number": fmt.Sprintf("AS%d", asnum),
|
||||
"as_name": fields[2][1],
|
||||
}, nil)
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if now := time.Now(); now.Sub(last) >= 250*time.Millisecond {
|
||||
last = now
|
||||
fmt.Fprintf(os.Stderr, "\r- adding prefixes [%s] %d", os.Args[index], count)
|
||||
}
|
||||
}
|
||||
handle.Close()
|
||||
fmt.Fprintf(os.Stderr, "\r- added prefixes [%s] (%.3fs - %d entries)\n", os.Args[index], float64(time.Now().Sub(start))/float64(time.Second), count)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
description := ""
|
||||
if index := strings.Index(os.Args[2], "@"); index > 0 {
|
||||
description = os.Args[2][index+1:]
|
||||
os.Args[2] = os.Args[2][:index]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\r- saving database [%s]... ", os.Args[2])
|
||||
if _, err := pfdb.Save(os.Args[2], description); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "\r- saving database [%s] failed (%v)\n", os.Args[2], err)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "\r- saved database [%s] (%.3fs - total[%s] strings[%s] numbers[%s] pairs[%s] clusters[%s] maps[%s] nodes[%s])\n",
|
||||
os.Args[2], float64(time.Now().Sub(start))/float64(time.Second), size(pfdb.Total), size(pfdb.Strings[0]),
|
||||
size(pfdb.Numbers[0]), size(pfdb.Pairs[0]), size(pfdb.Clusters[0]), size(pfdb.Maps[0]), size(pfdb.Nodes[0]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func lookup() {
|
||||
databases := []*prefixdb.PrefixDB{}
|
||||
for index := 2; index < len(os.Args); index++ {
|
||||
if strings.HasSuffix(os.Args[index], `.pfdb`) {
|
||||
fmt.Fprintf(os.Stderr, "\r- loading database [%s]...", os.Args[index])
|
||||
database := prefixdb.New()
|
||||
if err := database.Load(os.Args[index]); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "\r- loaded database [%s] (total[%s] version[%d.%d.%d] description[%s])\n",
|
||||
os.Args[index], size(database.Total), (database.Version>>16)&0xff, (database.Version>>8)&0xff, database.Version&0xff, database.Description)
|
||||
databases = append(databases, database)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "\r- loading database [%s] failed (%v)\n", os.Args[index], err)
|
||||
}
|
||||
} else {
|
||||
if ip := net.ParseIP(os.Args[index]); ip == nil {
|
||||
fmt.Fprintf(os.Stderr, "- lookup [%s] failed (not a valid IP address)", os.Args[index])
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "- lookup [%s] ", os.Args[index])
|
||||
lookup := map[string]interface{}{}
|
||||
for _, database := range databases {
|
||||
lookup, _ = database.Lookup(ip, lookup)
|
||||
}
|
||||
data, _ := json.Marshal(lookup)
|
||||
fmt.Printf("%s\n", data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func server() {
|
||||
databases := []*prefixdb.PrefixDB{}
|
||||
for index := 3; index < len(os.Args); index++ {
|
||||
fmt.Fprintf(os.Stderr, "\r- loading database [%s]...", os.Args[index])
|
||||
database := prefixdb.New()
|
||||
if err := database.Load(os.Args[index]); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "\r- loaded database [%s] (total[%s] version[%d.%d.%d] description[%s])\n",
|
||||
os.Args[index], size(database.Total), (database.Version>>16)&0xff, (database.Version>>8)&0xff, database.Version&0xff, database.Description)
|
||||
databases = append(databases, database)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "\r- loading database [%s] failed (%v)\n", os.Args[index], err)
|
||||
}
|
||||
}
|
||||
http.HandleFunc("/", func(response http.ResponseWriter, request *http.Request) {
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
remote, _, _ := net.SplitHostPort(request.RemoteAddr)
|
||||
if value := request.Header.Get("X-Forwarded-For"); value != "" {
|
||||
remote = strings.Split(value, ",")[0]
|
||||
}
|
||||
parameters := request.URL.Query()
|
||||
if value := parameters.Get("remote"); value != "" {
|
||||
remote = value
|
||||
}
|
||||
lookup := map[string]interface{}{}
|
||||
if ip := net.ParseIP(remote); ip != nil {
|
||||
lookup["ip"] = fmt.Sprintf("%s", ip)
|
||||
for _, database := range databases {
|
||||
lookup, _ = database.Lookup(ip, lookup)
|
||||
}
|
||||
data, _ := json.Marshal(lookup)
|
||||
response.Write(data)
|
||||
}
|
||||
})
|
||||
parts := strings.Split(os.Args[2], ",")
|
||||
server := &http.Server{Addr: strings.TrimLeft(parts[0], "*"), ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second}
|
||||
fmt.Fprintf(os.Stderr, "\r- listening to [%s]\n", os.Args[2])
|
||||
if len(parts) > 1 {
|
||||
server.ListenAndServeTLS(parts[1], parts[2])
|
||||
} else {
|
||||
server.ListenAndServe()
|
||||
}
|
||||
}
|
||||
|
||||
func usage(status int) {
|
||||
fmt.Fprintf(os.Stderr, "usage: prefixdb <action> [parameters...]\n\n"+
|
||||
"help show this help screen\n"+
|
||||
"json <database[@<description>]> <JSON prefixes>... build database from generic JSON-formatted prefixes lists\n"+
|
||||
"city <database[@<description>]> <CSV locations> <CSV prefixes>... build database from MaxMind GeoIP2 cities lists\n"+
|
||||
"asn <database[@<description>]> <CSV prefixes>... build database from MaxMind GeoLite2 asnums lists\n"+
|
||||
"lookup <database>... <address>... lookup entries in databases\n"+
|
||||
"server <bind address> <database>... spawn an HTTP(S) server for entries lookup\n")
|
||||
os.Exit(status)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
usage(1)
|
||||
}
|
||||
switch os.Args[1] {
|
||||
case "help":
|
||||
usage(0)
|
||||
case "json":
|
||||
if len(os.Args) < 3 {
|
||||
usage(1)
|
||||
}
|
||||
mkjson()
|
||||
case "city":
|
||||
if len(os.Args) < 5 {
|
||||
usage(1)
|
||||
}
|
||||
mkcity()
|
||||
case "asn":
|
||||
if len(os.Args) < 3 {
|
||||
usage(1)
|
||||
}
|
||||
mkasn()
|
||||
case "lookup":
|
||||
lookup()
|
||||
case "server":
|
||||
server()
|
||||
default:
|
||||
usage(2)
|
||||
}
|
||||
}
|
|
@ -1,827 +0,0 @@
|
|||
package prefixdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const VERSION = 0x00010000
|
||||
|
||||
type fame struct {
|
||||
fame int
|
||||
value interface{}
|
||||
}
|
||||
type byfame []*fame
|
||||
|
||||
func (a byfame) Len() int { return len(a) }
|
||||
func (a byfame) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byfame) Less(i, j int) bool { return a[i].fame > a[j].fame }
|
||||
|
||||
type node struct {
|
||||
down [2]*node
|
||||
up *node
|
||||
data []uint64
|
||||
offset int
|
||||
explored [4]bool
|
||||
emitted bool
|
||||
id int
|
||||
}
|
||||
type cluster struct {
|
||||
values [3]int // fame / initial index / final index
|
||||
pairs []uint64 // cluster pairs
|
||||
data []byte // reduced cluster pairs
|
||||
}
|
||||
type PrefixDB struct {
|
||||
sync.RWMutex
|
||||
tree node
|
||||
strings map[string]*[3]int // fame / initial index / final index
|
||||
numbers map[float64]*[3]int // fame / initial index / final index
|
||||
pairs map[uint64]*[3]int // fame / initial index / final index
|
||||
clusters map[[16]byte]*cluster
|
||||
data []byte
|
||||
Total int
|
||||
Version uint32
|
||||
Description string
|
||||
Strings [4]int // size / count / offset / strings index width (bytes)
|
||||
Numbers [3]int // size / count / offset
|
||||
Pairs [3]int // size / count / offset
|
||||
Clusters [4]int // size / count / offset / clusters index width (bytes)
|
||||
Maps [3]int // size / count / offset
|
||||
Nodes [4]int // size / count / offset / nodes width (bits)
|
||||
}
|
||||
|
||||
var cores int
|
||||
|
||||
func New() *PrefixDB {
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
return &PrefixDB{strings: map[string]*[3]int{}, numbers: map[float64]*[3]int{}, pairs: map[uint64]*[3]int{}, clusters: map[[16]byte]*cluster{}}
|
||||
}
|
||||
|
||||
func (this *PrefixDB) Add(prefix net.IPNet, data map[string]interface{}, clusters [][]string) {
|
||||
prefix.IP = prefix.IP.To16()
|
||||
ones, bits := prefix.Mask.Size()
|
||||
if bits == 32 {
|
||||
ones += 96
|
||||
prefix.Mask = net.CIDRMask(ones, bits+96)
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
pnode := &this.tree
|
||||
for bit := 0; bit < ones; bit++ {
|
||||
down := 0
|
||||
if (prefix.IP[bit/8] & (1 << (7 - (byte(bit) % 8)))) != 0 {
|
||||
down = 1
|
||||
}
|
||||
if pnode.down[down] == nil {
|
||||
pnode.down[down] = &node{}
|
||||
pnode.down[down].up = pnode
|
||||
}
|
||||
if len(pnode.data) != 0 {
|
||||
pnode.data = []uint64{}
|
||||
}
|
||||
pnode = pnode.down[down]
|
||||
}
|
||||
|
||||
skeys, ckeys, lkeys := "", [][]string{}, []string{}
|
||||
for _, cluster := range clusters {
|
||||
skeys += strings.Join(cluster, ` `) + ` `
|
||||
ckeys = append(ckeys, cluster)
|
||||
}
|
||||
for key, _ := range data {
|
||||
if strings.Index(skeys, key) < 0 {
|
||||
lkeys = append(lkeys, key)
|
||||
}
|
||||
}
|
||||
ckeys = append(ckeys, lkeys)
|
||||
for cindex, keys := range ckeys {
|
||||
cpairs := []uint64{}
|
||||
for _, key := range keys {
|
||||
if len(key) > 255 {
|
||||
continue
|
||||
}
|
||||
if value, ok := data[key]; ok {
|
||||
index := 0
|
||||
if _, ok := this.strings[key]; !ok {
|
||||
index = len(this.strings)
|
||||
this.strings[key] = &[3]int{1, index}
|
||||
} else {
|
||||
index = this.strings[key][1]
|
||||
this.strings[key][0]++
|
||||
}
|
||||
pair := uint64((uint32(index)&0x0fffffff)|0x10000000) << 32
|
||||
if tvalue, ok := value.(string); ok {
|
||||
if len(tvalue) <= 255 {
|
||||
index = 0
|
||||
if _, ok := this.strings[tvalue]; !ok {
|
||||
index = len(this.strings)
|
||||
this.strings[tvalue] = &[3]int{1, index}
|
||||
} else {
|
||||
index = this.strings[tvalue][1]
|
||||
this.strings[tvalue][0]++
|
||||
}
|
||||
pair |= uint64((uint32(index) & 0x0fffffff) | 0x10000000)
|
||||
} else {
|
||||
pair |= uint64(0x50000000)
|
||||
}
|
||||
} else if tvalue, ok := value.(float64); ok {
|
||||
index = 0
|
||||
if _, ok := this.numbers[tvalue]; !ok {
|
||||
index = len(this.numbers)
|
||||
this.numbers[tvalue] = &[3]int{1, index}
|
||||
} else {
|
||||
index = this.numbers[tvalue][1]
|
||||
this.numbers[tvalue][0]++
|
||||
}
|
||||
pair |= uint64((uint32(index) & 0x0fffffff) | 0x20000000)
|
||||
} else if tvalue, ok := value.(bool); ok {
|
||||
if tvalue {
|
||||
pair |= uint64(0x30000000)
|
||||
} else {
|
||||
pair |= uint64(0x40000000)
|
||||
}
|
||||
} else {
|
||||
pair |= uint64(0x50000000)
|
||||
}
|
||||
if _, ok := this.pairs[pair]; !ok {
|
||||
index = len(this.pairs)
|
||||
this.pairs[pair] = &[3]int{1, index}
|
||||
} else {
|
||||
this.pairs[pair][0]++
|
||||
}
|
||||
if cindex < len(ckeys)-1 {
|
||||
cpairs = append(cpairs, pair)
|
||||
} else {
|
||||
pnode.data = append(pnode.data, pair)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(cpairs) != 0 {
|
||||
buffer := make([]byte, len(cpairs)*8)
|
||||
for index, value := range cpairs {
|
||||
binary.BigEndian.PutUint64(buffer[index*8:], value)
|
||||
}
|
||||
key := md5.Sum(buffer)
|
||||
index := 0
|
||||
if _, ok := this.clusters[key]; !ok {
|
||||
index = len(this.clusters)
|
||||
this.clusters[key] = &cluster{pairs: cpairs, values: [3]int{1, index}}
|
||||
} else {
|
||||
index = this.clusters[key].values[1]
|
||||
this.clusters[key].values[0]++
|
||||
}
|
||||
pnode.data = append(pnode.data, 0x7000000000000000|((uint64(index)<<32)&0x0fffffff00000000))
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func wbytes(bytes, value int, data []byte) {
|
||||
if len(data) >= bytes {
|
||||
for index := bytes - 1; index >= 0; index-- {
|
||||
data[bytes-index-1] = byte(value >> (uint(index * 8)))
|
||||
}
|
||||
}
|
||||
}
|
||||
func wpbits(prefix byte, value int) []byte {
|
||||
if value <= 7 {
|
||||
return []byte{prefix | (byte(value) & 0x07)}
|
||||
}
|
||||
bytes := int(math.Ceil(math.Ceil(math.Log2(float64(value+1))) / 8))
|
||||
data := []byte{prefix | 0x08 | byte(bytes)}
|
||||
for nibble := bytes - 1; nibble >= 0; nibble-- {
|
||||
data = append(data, byte(value>>(uint(nibble*8))))
|
||||
}
|
||||
return data
|
||||
}
|
||||
func wnbits(bits, value0, value1 int, data []byte) {
|
||||
if bits >= 8 && bits <= 32 && bits%4 == 0 && len(data) >= bits/4 {
|
||||
switch bits {
|
||||
case 8:
|
||||
data[0], data[1] = byte(value0), byte(value1)
|
||||
case 12:
|
||||
data[0], data[1], data[2] = byte(value0>>4), byte(value0<<4)|(byte(value1>>8)&0x0f), byte(value1)
|
||||
case 16:
|
||||
binary.BigEndian.PutUint16(data[0:], uint16(value0))
|
||||
binary.BigEndian.PutUint16(data[2:], uint16(value1))
|
||||
case 20:
|
||||
data[0], data[1] = byte(value0>>12), byte(value0>>4)
|
||||
data[2] = byte(value0<<4) | (byte(value1>>16) & 0x0f)
|
||||
data[3], data[4] = byte(value1>>8), byte(value1)
|
||||
case 24:
|
||||
data[0], data[1], data[2] = byte(value0>>16), byte(value0>>8), byte(value0)
|
||||
data[3], data[4], data[5] = byte(value1>>16), byte(value1>>8), byte(value1)
|
||||
case 28:
|
||||
data[0], data[1], data[2] = byte(value0>>20), byte(value0>>12), byte(value0>>4)
|
||||
data[3] = byte(value0<<4) | (byte(value1>>24) & 0x0f)
|
||||
data[4], data[5], data[6] = byte(value1>>16), byte(value1>>8), byte(value1)
|
||||
case 32:
|
||||
binary.BigEndian.PutUint32(data[0:], uint32(value0))
|
||||
binary.BigEndian.PutUint32(data[4:], uint32(value1))
|
||||
}
|
||||
}
|
||||
}
|
||||
func (this *PrefixDB) Save(path, description string) (content []byte, err error) {
|
||||
// layout header + signature placeholder + description
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
this.data = []byte{'P', 'F', 'D', 'B', 0, (VERSION >> 16) & 0xff, (VERSION >> 8) & 0xff, (VERSION & 0xff),
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 'D', 'E', 'S', 'C', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
if description == "" {
|
||||
description = time.Now().Format(`20060102150405`)
|
||||
}
|
||||
this.Description = description
|
||||
copy(this.data[28:47], []byte(description))
|
||||
|
||||
// layout strings dictionary (ordered by fame)
|
||||
this.Strings[0] = 0
|
||||
for key, _ := range this.strings {
|
||||
this.Strings[0] += len(key)
|
||||
}
|
||||
this.Strings[3] = int(math.Ceil(math.Ceil(math.Log2(float64(this.Strings[0]+1))) / 8))
|
||||
this.data = append(this.data, []byte{'S', 'T', 'R', 'S', byte(this.Strings[3]), 0, 0, 0, 0, 0, 0, 0, 0}...)
|
||||
this.Strings[2] = len(this.data)
|
||||
this.Strings[1] = len(this.strings)
|
||||
this.Strings[0] += this.Strings[1] * this.Strings[3]
|
||||
flist := make([]*fame, this.Strings[1])
|
||||
for key, values := range this.strings {
|
||||
flist[values[1]] = &fame{values[0], key}
|
||||
}
|
||||
sort.Sort(byfame(flist))
|
||||
this.data = append(this.data, make([]byte, this.Strings[1]*this.Strings[3])...)
|
||||
offset := 0
|
||||
for index, item := range flist {
|
||||
this.strings[item.value.(string)][2] = index
|
||||
this.data = append(this.data, []byte(item.value.(string))...)
|
||||
wbytes(this.Strings[3], offset, this.data[this.Strings[2]+(index*this.Strings[3]):])
|
||||
offset += len(item.value.(string))
|
||||
}
|
||||
binary.BigEndian.PutUint32(this.data[this.Strings[2]-8:], uint32(this.Strings[0]))
|
||||
binary.BigEndian.PutUint32(this.data[this.Strings[2]-4:], uint32(this.Strings[1]))
|
||||
strings := make([]*fame, this.Strings[1])
|
||||
for key, values := range this.strings {
|
||||
strings[values[1]] = &fame{values[0], key}
|
||||
}
|
||||
|
||||
// layout numbers dictionary (ordered by fame)
|
||||
this.data = append(this.data, []byte{'N', 'U', 'M', 'S', 0, 0, 0, 0, 0, 0, 0, 0}...)
|
||||
this.Numbers[2] = len(this.data)
|
||||
this.Numbers[1] = len(this.numbers)
|
||||
this.Numbers[0] = this.Numbers[1] * 8
|
||||
flist = make([]*fame, this.Numbers[1])
|
||||
for key, values := range this.numbers {
|
||||
flist[values[1]] = &fame{values[0], key}
|
||||
}
|
||||
sort.Sort(byfame(flist))
|
||||
this.data = append(this.data, make([]byte, this.Numbers[1]*8)...)
|
||||
for index, item := range flist {
|
||||
this.numbers[item.value.(float64)][2] = index
|
||||
binary.BigEndian.PutUint64(this.data[this.Numbers[2]+(index*8):], math.Float64bits(item.value.(float64)))
|
||||
}
|
||||
binary.BigEndian.PutUint32(this.data[this.Numbers[2]-8:], uint32(this.Numbers[0]))
|
||||
binary.BigEndian.PutUint32(this.data[this.Numbers[2]-4:], uint32(this.Numbers[1]))
|
||||
numbers := make([]*fame, this.Numbers[1])
|
||||
for key, values := range this.numbers {
|
||||
numbers[values[1]] = &fame{values[0], key}
|
||||
}
|
||||
|
||||
// layout pairs dictionary (ordered by fame)
|
||||
this.data = append(this.data, []byte{'P', 'A', 'I', 'R', 0, 0, 0, 0, 0, 0, 0, 0}...)
|
||||
this.Pairs[2] = len(this.data)
|
||||
flist = make([]*fame, len(this.pairs))
|
||||
for key, values := range this.pairs {
|
||||
flist[values[1]] = &fame{values[0], key}
|
||||
}
|
||||
sort.Sort(byfame(flist))
|
||||
for index, item := range flist {
|
||||
if item.fame > 1 {
|
||||
this.pairs[item.value.(uint64)][2] = index
|
||||
} else {
|
||||
delete(this.pairs, item.value.(uint64))
|
||||
}
|
||||
}
|
||||
this.Pairs[1] = len(this.pairs)
|
||||
this.Pairs[0] = this.Pairs[1] * 8
|
||||
this.data = append(this.data, make([]byte, this.Pairs[0])...)
|
||||
for index, item := range flist {
|
||||
if item.fame <= 1 {
|
||||
break
|
||||
}
|
||||
pair := 0x1000000000000000 | (uint64(this.strings[strings[(item.value.(uint64)>>32)&0x0fffffff].value.(string)][2]) << 32)
|
||||
switch (item.value.(uint64) & 0xf0000000) >> 28 {
|
||||
case 1:
|
||||
pair |= 0x10000000 | uint64(this.strings[strings[item.value.(uint64)&0x0fffffff].value.(string)][2])
|
||||
case 2:
|
||||
pair |= 0x20000000 | uint64(this.numbers[numbers[item.value.(uint64)&0x0fffffff].value.(float64)][2])
|
||||
default:
|
||||
pair |= item.value.(uint64) & 0xf0000000
|
||||
}
|
||||
binary.BigEndian.PutUint64(this.data[this.Pairs[2]+(index*8):], pair)
|
||||
}
|
||||
binary.BigEndian.PutUint32(this.data[this.Pairs[2]-8:], uint32(this.Pairs[0]))
|
||||
binary.BigEndian.PutUint32(this.data[this.Pairs[2]-4:], uint32(this.Pairs[1]))
|
||||
|
||||
// layout clusters dictionary (ordered by fame, and reduced for strings, numbers and pairs)
|
||||
this.Clusters[0] = 0
|
||||
for _, cluster := range this.clusters {
|
||||
for _, pair := range cluster.pairs {
|
||||
if _, ok := this.pairs[pair]; ok {
|
||||
cluster.data = append(cluster.data, wpbits(0x60, this.pairs[pair][2])...)
|
||||
} else {
|
||||
cluster.data = append(cluster.data, wpbits(0x10, this.strings[strings[(pair>>32)&0x0fffffff].value.(string)][2])...)
|
||||
switch (pair & 0xf0000000) >> 28 {
|
||||
case 1:
|
||||
cluster.data = append(cluster.data, wpbits(0x10, this.strings[strings[pair&0x0fffffff].value.(string)][2])...)
|
||||
case 2:
|
||||
cluster.data = append(cluster.data, wpbits(0x20, this.numbers[numbers[pair&0x0fffffff].value.(float64)][2])...)
|
||||
default:
|
||||
cluster.data = append(cluster.data, byte((pair&0xf0000000)>>24))
|
||||
}
|
||||
}
|
||||
}
|
||||
this.Clusters[0] += len(cluster.data)
|
||||
}
|
||||
this.Clusters[3] = int(math.Ceil(math.Ceil(math.Log2(float64(this.Clusters[0]+1))) / 8))
|
||||
this.data = append(this.data, []byte{'C', 'L', 'U', 'S', byte(this.Clusters[3]), 0, 0, 0, 0, 0, 0, 0, 0}...)
|
||||
this.Clusters[2] = len(this.data)
|
||||
this.Clusters[1] = len(this.clusters)
|
||||
this.Clusters[0] += this.Clusters[1] * this.Clusters[3]
|
||||
flist = make([]*fame, this.Clusters[1])
|
||||
for key, cluster := range this.clusters {
|
||||
flist[cluster.values[1]] = &fame{cluster.values[0], key}
|
||||
}
|
||||
sort.Sort(byfame(flist))
|
||||
this.data = append(this.data, make([]byte, this.Clusters[1]*this.Clusters[3])...)
|
||||
offset = 0
|
||||
for index, item := range flist {
|
||||
this.clusters[item.value.([16]byte)].values[2] = index
|
||||
this.data = append(this.data, this.clusters[item.value.([16]byte)].data...)
|
||||
wbytes(this.Clusters[3], offset, this.data[this.Clusters[2]+(index*this.Clusters[3]):])
|
||||
offset += len(this.clusters[item.value.([16]byte)].data)
|
||||
}
|
||||
binary.BigEndian.PutUint32(this.data[this.Clusters[2]-8:], uint32(this.Clusters[0]))
|
||||
binary.BigEndian.PutUint32(this.data[this.Clusters[2]-4:], uint32(this.Clusters[1]))
|
||||
clusters := make([]*fame, this.Clusters[1])
|
||||
for key, cluster := range this.clusters {
|
||||
clusters[cluster.values[1]] = &fame{cluster.values[0], key}
|
||||
}
|
||||
|
||||
// layout maps dictionary (reduced for strings, numbers, pairs and clusters)
|
||||
this.Nodes[1] = 1
|
||||
this.data = append(this.data, []byte{'M', 'A', 'P', 'S', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80}...)
|
||||
pnode := &this.tree
|
||||
this.Maps[2] = len(this.data) - 2
|
||||
this.Maps[1] = 1
|
||||
this.Maps[0] = 2
|
||||
for {
|
||||
if pnode.down[0] != nil && !pnode.explored[0] {
|
||||
pnode.explored[0] = true
|
||||
pnode = pnode.down[0]
|
||||
} else if pnode.down[1] != nil && !pnode.explored[1] {
|
||||
pnode.explored[1] = true
|
||||
pnode = pnode.down[1]
|
||||
} else if pnode.up != nil {
|
||||
pnode = pnode.up
|
||||
}
|
||||
if pnode.up == nil {
|
||||
break
|
||||
}
|
||||
if pnode.down[0] == nil && pnode.down[1] == nil {
|
||||
if len(pnode.data) == 0 {
|
||||
pnode.offset = 1
|
||||
} else {
|
||||
data := []byte{}
|
||||
for index := 0; index < len(pnode.data); index++ {
|
||||
last := byte(0x00)
|
||||
if index == len(pnode.data)-1 {
|
||||
last = 0x80
|
||||
}
|
||||
if ((pnode.data[index]>>32)&0xf0000000)>>28 == 7 {
|
||||
data = append(data, wpbits(last|0x70, this.clusters[clusters[(pnode.data[index]>>32)&0x0fffffff].value.([16]byte)].values[2])...)
|
||||
} else {
|
||||
if _, ok := this.pairs[pnode.data[index]]; ok {
|
||||
data = append(data, wpbits(last|0x60, this.pairs[pnode.data[index]][2])...)
|
||||
} else {
|
||||
data = append(data, wpbits(0x10, this.strings[strings[(pnode.data[index]>>32)&0x0fffffff].value.(string)][2])...)
|
||||
switch (pnode.data[index] & 0xf0000000) >> 28 {
|
||||
case 1:
|
||||
data = append(data, wpbits(last|0x10, this.strings[strings[pnode.data[index]&0x0fffffff].value.(string)][2])...)
|
||||
case 2:
|
||||
data = append(data, wpbits(last|0x20, this.numbers[numbers[pnode.data[index]&0x0fffffff].value.(float64)][2])...)
|
||||
default:
|
||||
data = append(data, last|byte((pnode.data[index]&0xf0000000)>>24))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
this.data = append(this.data, data...)
|
||||
pnode.offset = this.Maps[0]
|
||||
this.Maps[0] += len(data)
|
||||
this.Maps[1]++
|
||||
}
|
||||
} else if pnode.id == 0 {
|
||||
pnode.id = this.Nodes[1]
|
||||
this.Nodes[1]++
|
||||
}
|
||||
}
|
||||
binary.BigEndian.PutUint32(this.data[this.Maps[2]-8:], uint32(this.Maps[0]))
|
||||
binary.BigEndian.PutUint32(this.data[this.Maps[2]-4:], uint32(this.Maps[1]))
|
||||
|
||||
// layout nodes tree
|
||||
this.Nodes[3] = int(math.Ceil(math.Ceil(math.Log2(float64(this.Nodes[1]+this.Maps[0]+1)))/4) * 4)
|
||||
this.data = append(this.data, []byte{'N', 'O', 'D', 'E', byte(this.Nodes[3]), 0, 0, 0, 0, 0, 0, 0, 0}...)
|
||||
this.Nodes[2] = len(this.data)
|
||||
this.Nodes[0] = this.Nodes[1] * ((2 * this.Nodes[3]) / 8)
|
||||
this.data = append(this.data, make([]byte, this.Nodes[0])...)
|
||||
pnode = &this.tree
|
||||
next := [2]int{}
|
||||
for {
|
||||
if (pnode == &this.tree || pnode.id != 0) && !pnode.emitted {
|
||||
pnode.emitted = true
|
||||
for index := 0; index <= 1; index++ {
|
||||
next[index] = this.Nodes[1]
|
||||
if pnode.down[index] != nil {
|
||||
if pnode.down[index].id != 0 {
|
||||
next[index] = pnode.down[index].id
|
||||
} else {
|
||||
next[index] += pnode.down[index].offset
|
||||
}
|
||||
}
|
||||
}
|
||||
wnbits(this.Nodes[3], next[0], next[1], this.data[this.Nodes[2]+(pnode.id*((2*this.Nodes[3])/8)):])
|
||||
}
|
||||
if pnode.down[0] != nil && !pnode.explored[2] {
|
||||
pnode.explored[2] = true
|
||||
pnode = pnode.down[0]
|
||||
} else if pnode.down[1] != nil && !pnode.explored[3] {
|
||||
pnode.explored[3] = true
|
||||
pnode = pnode.down[1]
|
||||
} else if pnode.up != nil {
|
||||
pnode = pnode.up
|
||||
}
|
||||
if pnode.up == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
binary.BigEndian.PutUint32(this.data[this.Nodes[2]-8:], uint32(this.Nodes[0]))
|
||||
binary.BigEndian.PutUint32(this.data[this.Nodes[2]-4:], uint32(this.Nodes[1]))
|
||||
|
||||
// finalize header
|
||||
this.tree, this.strings, this.numbers, this.pairs, this.clusters = node{}, map[string]*[3]int{}, map[float64]*[3]int{}, map[uint64]*[3]int{}, map[[16]byte]*cluster{}
|
||||
hash := md5.Sum(this.data[24:])
|
||||
copy(this.data[8:], hash[:])
|
||||
this.Total = len(this.data)
|
||||
|
||||
// save database
|
||||
if path != "" {
|
||||
if path == "-" {
|
||||
_, err = os.Stdout.Write(this.data)
|
||||
} else {
|
||||
err = ioutil.WriteFile(path, this.data, 0644)
|
||||
}
|
||||
}
|
||||
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
return this.data, err
|
||||
}
|
||||
|
||||
func (this *PrefixDB) Load(path string) error {
|
||||
if data, err := ioutil.ReadFile(path); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if len(data) < 8 || string(data[0:4]) != "PFDB" {
|
||||
return errors.New(`invalid preamble`)
|
||||
}
|
||||
if version := (uint32(data[5]) << 16) + (uint32(data[6]) << 8) + uint32(data[7]); (version & 0xff0000) > (VERSION & 0xff0000) {
|
||||
return errors.New(fmt.Sprintf(`library major version %d is incompatible with database major version %d`, (VERSION&0xff0000)>>16, (version&0xff0000)>>16))
|
||||
} else {
|
||||
if len(data) < 24 || fmt.Sprintf("%x", md5.Sum(data[24:])) != fmt.Sprintf("%x", data[8:24]) {
|
||||
return errors.New(`database checksum is invalid`)
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
this.data = data
|
||||
this.Total = len(data)
|
||||
this.Version = version
|
||||
offset := 24
|
||||
if this.Total >= offset+4 && string(data[offset:offset+4]) == "DESC" {
|
||||
offset += 4
|
||||
if this.Total >= offset+20 {
|
||||
index := 0
|
||||
if index = bytes.Index(data[offset:offset+20], []byte{0}); index < 0 {
|
||||
index = 20
|
||||
}
|
||||
this.Description = fmt.Sprintf("%s", data[offset:offset+index])
|
||||
offset += 20
|
||||
if this.Total >= offset+4 && string(data[offset:offset+4]) == "STRS" {
|
||||
offset += 4
|
||||
if this.Total >= offset+9 {
|
||||
this.Strings[3] = int(data[offset])
|
||||
this.Strings[2] = offset + 9
|
||||
this.Strings[1] = int(binary.BigEndian.Uint32(this.data[offset+5:]))
|
||||
this.Strings[0] = int(binary.BigEndian.Uint32(this.data[offset+1:]))
|
||||
offset += 9 + this.Strings[0]
|
||||
if this.Total >= offset+4 && string(data[offset:offset+4]) == "NUMS" {
|
||||
offset += 4
|
||||
if this.Total >= offset+8 {
|
||||
this.Numbers[2] = offset + 8
|
||||
this.Numbers[1] = int(binary.BigEndian.Uint32(this.data[offset+4:]))
|
||||
this.Numbers[0] = int(binary.BigEndian.Uint32(this.data[offset:]))
|
||||
offset += 8 + this.Numbers[0]
|
||||
if this.Total >= offset+4 && string(data[offset:offset+4]) == "PAIR" {
|
||||
offset += 4
|
||||
if this.Total >= offset+8 {
|
||||
this.Pairs[2] = offset + 8
|
||||
this.Pairs[1] = int(binary.BigEndian.Uint32(this.data[offset+4:]))
|
||||
this.Pairs[0] = int(binary.BigEndian.Uint32(this.data[offset:]))
|
||||
offset += 8 + this.Pairs[0]
|
||||
if this.Total >= offset+4 && string(data[offset:offset+4]) == "CLUS" {
|
||||
offset += 4
|
||||
this.Clusters[3] = int(data[offset])
|
||||
this.Clusters[2] = offset + 9
|
||||
this.Clusters[1] = int(binary.BigEndian.Uint32(this.data[offset+5:]))
|
||||
this.Clusters[0] = int(binary.BigEndian.Uint32(this.data[offset+1:]))
|
||||
offset += 9 + this.Clusters[0]
|
||||
if this.Total >= offset+4 && string(data[offset:offset+4]) == "MAPS" {
|
||||
offset += 4
|
||||
if this.Total >= offset+8 {
|
||||
this.Maps[2] = offset + 8
|
||||
this.Maps[1] = int(binary.BigEndian.Uint32(this.data[offset+4:]))
|
||||
this.Maps[0] = int(binary.BigEndian.Uint32(this.data[offset:]))
|
||||
offset += 8 + this.Maps[0]
|
||||
if this.Total >= offset+9 && string(data[offset:offset+4]) == "NODE" {
|
||||
offset += 4
|
||||
this.Nodes[3] = int(data[offset])
|
||||
this.Nodes[2] = offset + 9
|
||||
this.Nodes[1] = int(binary.BigEndian.Uint32(this.data[offset+5:]))
|
||||
this.Nodes[0] = int(binary.BigEndian.Uint32(this.data[offset+1:]))
|
||||
if offset+9+this.Nodes[0] != this.Total {
|
||||
this.Nodes[2] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
if this.Strings[2] == 0 || this.Numbers[2] == 0 || this.Pairs[2] == 0 || this.Clusters[2] == 0 || this.Maps[2] == 0 || this.Nodes[2] == 0 {
|
||||
return errors.New(`database structure is invalid`)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func rpbits(data []byte) (section, index, size int, last bool) {
|
||||
section = int((data[0] & 0x70) >> 4)
|
||||
if data[0]&0x80 != 0 || section == 0 {
|
||||
last = true
|
||||
}
|
||||
if section == 1 || section == 2 || section == 6 || section == 7 {
|
||||
if data[0]&0x08 != 0 {
|
||||
size = int(data[0] & 0x07)
|
||||
for nibble := 1; nibble <= size; nibble++ {
|
||||
index |= int(data[nibble]) << (uint(size-nibble) * 8)
|
||||
}
|
||||
} else {
|
||||
index = int(data[0] & 0x07)
|
||||
}
|
||||
}
|
||||
size++
|
||||
return section, index, size, last
|
||||
}
|
||||
func rnbits(bits, index, down int, data []byte) int {
|
||||
if bits >= 8 && bits <= 32 && bits%4 == 0 && (down == 0 || down == 1) && len(data) >= (index+1)*(bits/4) {
|
||||
offset := index * (bits / 4)
|
||||
switch bits {
|
||||
case 8:
|
||||
return int(data[offset+down])
|
||||
case 12:
|
||||
if down == 0 {
|
||||
return (int(data[offset]) << 4) | ((int(data[offset+1]) >> 4) & 0x0f)
|
||||
} else {
|
||||
return ((int(data[offset+1]) & 0x0f) << 8) | int(data[offset+2])
|
||||
}
|
||||
case 16:
|
||||
return int(binary.BigEndian.Uint16(data[offset+(down*2):]))
|
||||
case 20:
|
||||
if down == 0 {
|
||||
return (int(data[offset]) << 12) | (int(data[offset+1]) << 4) | ((int(data[offset+2]) >> 4) & 0x0f)
|
||||
} else {
|
||||
return ((int(data[offset+2]) & 0x0f) << 16) | (int(data[offset+3]) << 8) | int(data[offset+4])
|
||||
}
|
||||
case 24:
|
||||
if down == 0 {
|
||||
return (int(data[offset]) << 16) | (int(data[offset+1]) << 8) | int(data[offset+2])
|
||||
} else {
|
||||
return (int(data[offset+3]) << 16) | (int(data[offset+4]) << 8) | int(data[offset+5])
|
||||
}
|
||||
case 28:
|
||||
if down == 0 {
|
||||
return (int(data[offset]) << 20) | (int(data[offset+1]) << 12) | (int(data[offset+2]) << 4) | ((int(data[offset+3]) >> 4) & 0x0f)
|
||||
} else {
|
||||
return ((int(data[offset+3]) & 0x0f) << 24) | (int(data[offset+4]) << 16) | (int(data[offset+5]) << 8) | int(data[offset+6])
|
||||
}
|
||||
case 32:
|
||||
return int(binary.BigEndian.Uint32(data[offset+(down*4):]))
|
||||
}
|
||||
}
|
||||
return index
|
||||
}
|
||||
func rbytes(width int, data []byte) (value int) {
|
||||
for index := 0; index < width; index++ {
|
||||
value |= int(data[index]) << (uint(width-1-index) * 8)
|
||||
}
|
||||
return value
|
||||
}
|
||||
func (this *PrefixDB) rstring(index int) string {
|
||||
count, offset, width := this.Strings[1], this.Strings[2], this.Strings[3]
|
||||
if index >= count {
|
||||
return ""
|
||||
}
|
||||
start, end := rbytes(width, this.data[offset+(index*width):]), 0
|
||||
if index < count-1 {
|
||||
end = rbytes(width, this.data[offset+(index+1)*width:])
|
||||
} else {
|
||||
end = this.Strings[0] - (count * width)
|
||||
}
|
||||
return string(this.data[offset+(count*width)+start : offset+(count*width)+end])
|
||||
}
|
||||
func (this *PrefixDB) rnumber(index int) float64 {
|
||||
if index >= this.Numbers[1] {
|
||||
return 0.0
|
||||
}
|
||||
return math.Float64frombits(binary.BigEndian.Uint64(this.data[this.Numbers[2]+(index*8):]))
|
||||
}
|
||||
func (this *PrefixDB) rpair(index int, pairs map[string]interface{}) {
|
||||
if index < this.Pairs[1] {
|
||||
pair := binary.BigEndian.Uint64(this.data[this.Pairs[2]+(index*8):])
|
||||
if key := this.rstring(int((pair >> 32) & 0x0fffffff)); key != "" {
|
||||
switch (pair & 0xf0000000) >> 28 {
|
||||
case 1:
|
||||
pairs[key] = this.rstring(int(pair & 0x0fffffff))
|
||||
case 2:
|
||||
pairs[key] = this.rnumber(int(pair & 0x0fffffff))
|
||||
case 3:
|
||||
pairs[key] = true
|
||||
case 4:
|
||||
pairs[key] = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (this *PrefixDB) rcluster(index int, pairs map[string]interface{}) {
|
||||
count, offset, width := this.Clusters[1], this.Clusters[2], this.Clusters[3]
|
||||
if index < count {
|
||||
start, end := rbytes(width, this.data[offset+(index*width):]), 0
|
||||
if index < count-1 {
|
||||
end = rbytes(width, this.data[offset+(index+1)*width:])
|
||||
} else {
|
||||
end = this.Clusters[0] - (count * width)
|
||||
}
|
||||
start += offset + (count * width)
|
||||
end += offset + (count * width)
|
||||
key := ""
|
||||
for start < end {
|
||||
section, index, size, _ := rpbits(this.data[start:])
|
||||
switch section {
|
||||
case 1:
|
||||
if key != "" {
|
||||
pairs[key] = this.rstring(index)
|
||||
key = ""
|
||||
} else {
|
||||
key = this.rstring(index)
|
||||
}
|
||||
case 2:
|
||||
if key != "" {
|
||||
pairs[key] = this.rnumber(index)
|
||||
key = ""
|
||||
}
|
||||
case 3:
|
||||
if key != "" {
|
||||
pairs[key] = true
|
||||
key = ""
|
||||
}
|
||||
case 4:
|
||||
if key != "" {
|
||||
pairs[key] = false
|
||||
key = ""
|
||||
}
|
||||
case 5:
|
||||
if key != "" {
|
||||
pairs[key] = nil
|
||||
key = ""
|
||||
}
|
||||
case 6:
|
||||
this.rpair(index, pairs)
|
||||
}
|
||||
start += size
|
||||
}
|
||||
}
|
||||
}
|
||||
func (this *PrefixDB) Lookup(address net.IP, input map[string]interface{}) (output map[string]interface{}, err error) {
|
||||
output = input
|
||||
if this.data == nil || this.Total == 0 || this.Version == 0 || this.Strings[2] == 0 || this.Numbers[2] == 0 ||
|
||||
this.Pairs[2] == 0 || this.Clusters[2] == 0 || this.Maps[2] == 0 || this.Nodes[2] == 0 || address == nil {
|
||||
err = errors.New("record not found")
|
||||
} else {
|
||||
address = address.To16()
|
||||
offset := 0
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
}
|
||||
for bit := 0; bit < 128; bit++ {
|
||||
down := 0
|
||||
if (address[bit/8] & (1 << (7 - (byte(bit) % 8)))) != 0 {
|
||||
down = 1
|
||||
}
|
||||
offset = rnbits(this.Nodes[3], offset, down, this.data[this.Nodes[2]:])
|
||||
if offset == this.Nodes[1] || offset == 0 {
|
||||
break
|
||||
}
|
||||
if output == nil {
|
||||
output = map[string]interface{}{}
|
||||
}
|
||||
if offset > this.Nodes[1] {
|
||||
offset -= this.Nodes[1]
|
||||
if offset < this.Maps[0] {
|
||||
offset += this.Maps[2]
|
||||
key := ""
|
||||
for offset < this.Maps[2]+this.Maps[0] {
|
||||
section, index, size, last := rpbits(this.data[offset:])
|
||||
switch section {
|
||||
case 1:
|
||||
if key != "" {
|
||||
output[key] = this.rstring(index)
|
||||
key = ""
|
||||
} else {
|
||||
key = this.rstring(index)
|
||||
}
|
||||
case 2:
|
||||
if key != "" {
|
||||
output[key] = this.rnumber(index)
|
||||
key = ""
|
||||
}
|
||||
case 3:
|
||||
if key != "" {
|
||||
output[key] = true
|
||||
key = ""
|
||||
}
|
||||
case 4:
|
||||
if key != "" {
|
||||
output[key] = false
|
||||
key = ""
|
||||
}
|
||||
case 5:
|
||||
if key != "" {
|
||||
output[key] = nil
|
||||
key = ""
|
||||
}
|
||||
case 6:
|
||||
this.rpair(index, output)
|
||||
case 7:
|
||||
this.rcluster(index, output)
|
||||
}
|
||||
if last {
|
||||
break
|
||||
}
|
||||
offset += size
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.RUnlock()
|
||||
}
|
||||
}
|
||||
return output, err
|
||||
}
|
|
@ -3,38 +3,25 @@ package rcache
|
|||
import (
|
||||
"crypto/md5"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
cores int
|
||||
cache map[[16]byte]*regexp.Regexp = map[[16]byte]*regexp.Regexp{}
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
func Get(expression string) *regexp.Regexp {
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
key := md5.Sum([]byte(expression))
|
||||
if cores > 1 {
|
||||
lock.RLock()
|
||||
}
|
||||
if cache[key] != nil {
|
||||
if cores > 1 {
|
||||
defer lock.RUnlock()
|
||||
}
|
||||
return cache[key].Copy()
|
||||
}
|
||||
if cores > 1 {
|
||||
lock.RUnlock()
|
||||
}
|
||||
if regex, err := regexp.Compile(expression); err == nil {
|
||||
if cores > 1 {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
}
|
||||
cache[key] = regex
|
||||
return cache[key].Copy()
|
||||
}
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pyke369/golang-support/rpack"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
options flag.FlagSet
|
||||
usemain bool
|
||||
)
|
||||
|
||||
options = flag.FlagSet{Usage: func() {
|
||||
fmt.Fprintf(os.Stderr, "usage: %s [options] rootdir\n\noptions are:\n\n", filepath.Base(os.Args[0]))
|
||||
options.PrintDefaults()
|
||||
},
|
||||
}
|
||||
options.String("output", "static.go", "the generated output file path")
|
||||
options.String("pkgname", "main", "the package name to use in the generated output")
|
||||
options.String("funcname", "resources", "the function name to use in the generated output")
|
||||
options.Bool("main", false, "whether to generate a main func or not")
|
||||
if err := options.Parse(os.Args[1:]); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
if options.NArg() == 0 {
|
||||
options.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
if options.Lookup("main").Value.String() == "true" {
|
||||
usemain = true
|
||||
}
|
||||
rpack.Pack(options.Arg(0), options.Lookup("output").Value.String(), options.Lookup("pkgname").Value.String(), options.Lookup("funcname").Value.String(), usemain)
|
||||
}
|
|
@ -1,155 +0,0 @@
|
|||
package rpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RPACK struct {
|
||||
Compressed bool
|
||||
Modified int64
|
||||
Mime string
|
||||
Content string
|
||||
raw []byte
|
||||
}
|
||||
|
||||
var guzpool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &gzip.Reader{}
|
||||
}}
|
||||
|
||||
func Pack(root, output, pkgname, funcname string, main bool) {
|
||||
root = strings.TrimSuffix(root, "/")
|
||||
if root == "" || output == "" {
|
||||
return
|
||||
}
|
||||
if pkgname == "" || main {
|
||||
pkgname = "main"
|
||||
}
|
||||
if funcname == "" {
|
||||
funcname = "resources"
|
||||
}
|
||||
funcname = strings.ToUpper(funcname[:1]) + funcname[1:]
|
||||
entries := map[string]*RPACK{}
|
||||
compressor, _ := gzip.NewWriterLevel(nil, gzip.BestCompression)
|
||||
count := 0
|
||||
size := int64(0)
|
||||
start := time.Now()
|
||||
filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
rpath := strings.TrimPrefix(path, root+"/")
|
||||
if info.Mode()&os.ModeType == 0 {
|
||||
for _, part := range strings.Split(rpath, "/") {
|
||||
if len(part) > 0 && part[0] == '.' {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pack := &RPACK{Modified: info.ModTime().Unix(), Mime: "text/plain"}
|
||||
if mime := mime.TypeByExtension(filepath.Ext(rpath)); mime != "" {
|
||||
pack.Mime = mime
|
||||
}
|
||||
content, _ := ioutil.ReadFile(path)
|
||||
compressed := bytes.Buffer{}
|
||||
compressor.Reset(&compressed)
|
||||
compressor.Write(content)
|
||||
compressor.Close()
|
||||
if compressed.Len() < len(content) {
|
||||
pack.Content = base64.StdEncoding.EncodeToString(compressed.Bytes())
|
||||
pack.Compressed = true
|
||||
} else {
|
||||
pack.Content = base64.StdEncoding.EncodeToString(content)
|
||||
}
|
||||
entries[rpath] = pack
|
||||
fmt.Fprintf(os.Stderr, "\r%-120.120s ", rpath)
|
||||
count++
|
||||
size += info.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
fmt.Fprintf(os.Stderr, "\r%-120.120s\rpacked %d file(s) %d byte(s) in %v\n", "", count, size, time.Now().Sub(start))
|
||||
if handle, err := os.OpenFile(output, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644); err == nil {
|
||||
random := make([]byte, 64)
|
||||
rand.Seed(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
rand.Read(random)
|
||||
uid := fmt.Sprintf("rpack%8.8x", md5.Sum(random))
|
||||
fmt.Fprintf(handle,
|
||||
`package %s
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/pyke369/golang-support/rpack"
|
||||
)
|
||||
|
||||
var %s map[string]*rpack.RPACK = map[string]*rpack.RPACK {
|
||||
`,
|
||||
pkgname, uid)
|
||||
for path, entry := range entries {
|
||||
fmt.Fprintf(handle,
|
||||
` "%s": &rpack.RPACK{Compressed:%t, Modified:%d, Mime:"%s", Content:"%s"},
|
||||
`, path, entry.Compressed, entry.Modified, entry.Mime, entry.Content)
|
||||
}
|
||||
fmt.Fprintf(handle,
|
||||
`}
|
||||
|
||||
func %s() http.Handler {
|
||||
return rpack.Serve(%s)
|
||||
}
|
||||
`, funcname, uid)
|
||||
if main {
|
||||
fmt.Fprintf(handle,
|
||||
`
|
||||
func main() {
|
||||
http.Handle("/resources/", http.StripPrefix("/resources/", %s()))
|
||||
http.ListenAndServe(":8000", nil)
|
||||
}
|
||||
`, funcname)
|
||||
}
|
||||
handle.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func Serve(pack map[string]*RPACK) http.Handler {
|
||||
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
|
||||
var err error
|
||||
if pack == nil || pack[request.URL.Path] == nil {
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if pack[request.URL.Path].raw == nil {
|
||||
if pack[request.URL.Path].raw, err = base64.StdEncoding.DecodeString(pack[request.URL.Path].Content); err != nil {
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
resource := pack[request.URL.Path]
|
||||
response.Header().Set("Content-Type", resource.Mime)
|
||||
if strings.Index(request.Header.Get("Accept-Encoding"), "gzip") >= 0 && request.Header.Get("Range") == "" && resource.Compressed {
|
||||
response.Header().Set("Content-Encoding", "gzip")
|
||||
response.Header().Set("Content-Length", fmt.Sprintf("%d", len(resource.raw)))
|
||||
http.ServeContent(response, request, request.URL.Path, time.Unix(resource.Modified, 0), bytes.NewReader(resource.raw))
|
||||
} else {
|
||||
if resource.Compressed {
|
||||
decompressor := guzpool.Get().(*gzip.Reader)
|
||||
decompressor.Reset(bytes.NewReader(resource.raw))
|
||||
if raw, err := ioutil.ReadAll(decompressor); err == nil {
|
||||
http.ServeContent(response, request, request.URL.Path, time.Unix(resource.Modified, 0), bytes.NewReader(raw))
|
||||
}
|
||||
decompressor.Close()
|
||||
guzpool.Put(decompressor)
|
||||
} else {
|
||||
http.ServeContent(response, request, request.URL.Path, time.Unix(resource.Modified, 0), bytes.NewReader(resource.raw))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -14,11 +14,12 @@ import (
|
|||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pyke369/golang-support/rcache"
|
||||
)
|
||||
|
||||
type UConfig struct {
|
||||
|
@ -36,7 +37,6 @@ type replacer struct {
|
|||
}
|
||||
|
||||
var (
|
||||
cores int
|
||||
escaped string
|
||||
unescaper *regexp.Regexp
|
||||
requoter *regexp.Regexp
|
||||
|
@ -49,27 +49,27 @@ var (
|
|||
|
||||
func init() {
|
||||
escaped = "{}[],#/*;:= " // match characters within quotes to escape
|
||||
unescaper = regexp.MustCompile("@\\d+@") // match escaped characters (to reverse previous escaping)
|
||||
expander = regexp.MustCompile("{{([<|@&!\\-\\+])\\s*([^{}]*?)\\s*}}") // match external content macros
|
||||
sizer = regexp.MustCompile("^(\\d+(?:\\.\\d*)?)\\s*([KMGTP]?)(B?)$") // match size value
|
||||
duration1 = regexp.MustCompile("(\\d+)(Y|MO|D|H|MN|S|MS|US)?") // match duration value form1 (free)
|
||||
duration2 = regexp.MustCompile("^(?:(\\d+):)?(\\d{2}):(\\d{2})(?:\\.(\\d{1,3}))?$") // match duration value form2 (timecode)
|
||||
replacers[0] = replacer{regexp.MustCompile("(?m)^(.*?)(?:#|//).*?$"), "$1", false} // remove # and // commented portions
|
||||
replacers[1] = replacer{regexp.MustCompile("/\\*[^\\*]*\\*/"), "", true} // remove /* */ commented portions
|
||||
replacers[2] = replacer{regexp.MustCompile("(?m)^\\s+"), "", false} // trim leading spaces
|
||||
replacers[3] = replacer{regexp.MustCompile("(?m)\\s+$"), "", false} // trim trailing spaces
|
||||
unescaper = regexp.MustCompile(`@\d+@`) // match escaped characters (to reverse previous escaping)
|
||||
expander = regexp.MustCompile(`{{([<|@&!\-\+])\s*([^{}]*?)\s*}}`) // match external content macros
|
||||
sizer = regexp.MustCompile(`^(\d+(?:\.\d*)?)\s*([KMGTP]?)(B?)$`) // match size value
|
||||
duration1 = regexp.MustCompile(`(\d+)(Y|MO|D|H|MN|S|MS|US)?`) // match duration value form1 (free)
|
||||
duration2 = regexp.MustCompile(`^(?:(\d+):)?(\d{2}):(\d{2})(?:\.(\d{1,3}))?$`) // match duration value form2 (timecode)
|
||||
replacers[0] = replacer{regexp.MustCompile("(?m)^(.*?)(?:#|//).*?$"), `$1`, false} // remove # and // commented portions
|
||||
replacers[1] = replacer{regexp.MustCompile(`/\*[^\*]*\*/`), ``, true} // remove /* */ commented portions
|
||||
replacers[2] = replacer{regexp.MustCompile(`(?m)^\s+`), ``, false} // trim leading spaces
|
||||
replacers[3] = replacer{regexp.MustCompile(`(?m)\s+$`), ``, false} // trim trailing spaces
|
||||
replacers[4] = replacer{regexp.MustCompile("(?s)(^|[\r\n]+)\\[([^\\]\r\n]+?)\\](.+?)((?:[\r\n]+\\[)|$)"), "$1$2\n{$3\n}$4", true} // convert INI sections into JSON objects
|
||||
replacers[5] = replacer{regexp.MustCompile("(?m)^(\\S+)\\s+([^{}\\[\\],;:=]+);$"), "$1 = $2;", false} // add missing key-value separators
|
||||
replacers[6] = replacer{regexp.MustCompile("(?m);$"), ",", false} // replace ; line terminators by ,
|
||||
replacers[7] = replacer{regexp.MustCompile("(\\S+?)\\s*[:=]"), "$1:", false} // replace = key-value separators by :
|
||||
replacers[8] = replacer{regexp.MustCompile("([}\\]])(\\s*)([^,}\\]\\s])"), "$1,$2$3", false} // add missing objects/arrays , separators
|
||||
replacers[9] = replacer{regexp.MustCompile("(?m)(^[^:]+:.+?[^,])$"), "$1,", false} // add missing values trailing , seperators
|
||||
replacers[10] = replacer{regexp.MustCompile("(^|[,{\\[]+\\s*)([^:{\\[]+?)(\\s*[{\\[])"), "$1$2:$3", true} // add missing key-(object/array-)value separator
|
||||
replacers[11] = replacer{regexp.MustCompile("(?m)^([^\":{}\\[\\]]+)"), "\"$1\"", false} // add missing quotes around keys
|
||||
replacers[12] = replacer{regexp.MustCompile("([:,\\[\\s]+)([^\",\\[\\]{}\n\r]+?)(\\s*[,\\]}])"), "$1\"$2\"$3", false} // add missing quotes around values
|
||||
replacers[5] = replacer{regexp.MustCompile(`(?m)^(\S+)\s+([^{}\[\],;:=]+);$`), "$1 = $2;", false} // add missing key-value separators
|
||||
replacers[6] = replacer{regexp.MustCompile(`(?m);$`), `,`, false} // replace ; line terminators by ,
|
||||
replacers[7] = replacer{regexp.MustCompile(`(\S+?)\s*[:=]`), `$1:`, false} // replace = key-value separators by :
|
||||
replacers[8] = replacer{regexp.MustCompile(`([}\]])(\s*)([^,}\]\s])`), `$1,$2$3`, false} // add missing objects/arrays , separators
|
||||
replacers[9] = replacer{regexp.MustCompile("(?m)(^[^:]+:.+?[^,])$"), `$1,`, false} // add missing values trailing , seperators
|
||||
replacers[10] = replacer{regexp.MustCompile(`(^|[,{\[]+\s*)([^:{\[]+?)(\s*[{\[])`), `$1$2:$3`, true} // add missing key-(object/array-)value separator
|
||||
replacers[11] = replacer{regexp.MustCompile(`(?m)^([^":{}\[\]]+)`), `"$1"`, false} // add missing quotes around keys
|
||||
replacers[12] = replacer{regexp.MustCompile("([:,\\[\\s]+)([^\",\\[\\]{}\n\r]+?)(\\s*[,\\]}])"), `$1"$2"$3`, false} // add missing quotes around values
|
||||
replacers[13] = replacer{regexp.MustCompile("\"[\r\n]"), "\",\n", false} // add still issing objects/arrays , separators
|
||||
replacers[14] = replacer{regexp.MustCompile("\"\\s*(.+?)\\s*\""), "\"$1\"", false} // trim leading and trailing spaces in quoted strings
|
||||
replacers[15] = replacer{regexp.MustCompile(",+(\\s*[}\\]])"), "$1", false} // remove objets/arrays last element extra ,
|
||||
replacers[14] = replacer{regexp.MustCompile(`"\s*(.+?)\s*"`), `"$1"`, false} // trim leading and trailing spaces in quoted strings
|
||||
replacers[15] = replacer{regexp.MustCompile(`,+(\s*[}\]])`), `$1`, false} // remove objets/arrays last element extra ,
|
||||
}
|
||||
|
||||
func escape(input string) string {
|
||||
|
@ -77,7 +77,7 @@ func escape(input string) string {
|
|||
|
||||
instring := false
|
||||
for index := 0; index < len(input); index++ {
|
||||
if input[index:index+1] == "\"" && (index == 0 || input[index-1:index] != "\\") {
|
||||
if input[index:index+1] == `"` && (index == 0 || input[index-1:index] != `\`) {
|
||||
instring = !instring
|
||||
}
|
||||
if instring == true {
|
||||
|
@ -135,9 +135,6 @@ func reduce(input interface{}) {
|
|||
}
|
||||
|
||||
func New(input string, inline ...bool) (*UConfig, error) {
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
config := &UConfig{
|
||||
config: nil,
|
||||
}
|
||||
|
@ -145,11 +142,7 @@ func New(input string, inline ...bool) (*UConfig, error) {
|
|||
}
|
||||
|
||||
func (this *UConfig) Load(input string, inline ...bool) error {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
|
||||
this.cache = map[string]interface{}{}
|
||||
base, _ := os.Getwd()
|
||||
content := fmt.Sprintf("/*base:%s*/\n", base)
|
||||
|
@ -272,30 +265,29 @@ func (this *UConfig) Load(input string, inline ...bool) error {
|
|||
if syntax, ok := err.(*json.SyntaxError); ok && syntax.Offset < int64(len(content)) {
|
||||
if start := strings.LastIndex(content[:syntax.Offset], "\n") + 1; start >= 0 {
|
||||
line := strings.Count(content[:start], "\n") + 1
|
||||
this.Unlock()
|
||||
return errors.New(fmt.Sprintf("%s at line %d near %s", syntax, line, content[start:syntax.Offset]))
|
||||
}
|
||||
}
|
||||
this.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reduce(this.config)
|
||||
this.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *UConfig) Loaded() bool {
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
return !(this.config == nil)
|
||||
}
|
||||
|
||||
func (this *UConfig) Hash() string {
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
return this.hash
|
||||
}
|
||||
|
||||
|
@ -312,54 +304,41 @@ func (this *UConfig) GetPaths(path string) []string {
|
|||
paths []string = []string{}
|
||||
)
|
||||
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
prefix := ""
|
||||
if current == nil || path == "" {
|
||||
this.RUnlock()
|
||||
return paths
|
||||
}
|
||||
if cores > 1 {
|
||||
this.cacheLock.RLock()
|
||||
}
|
||||
if this.cache[path] != nil {
|
||||
if paths, ok := this.cache[path].([]string); ok {
|
||||
if cores > 1 {
|
||||
this.cacheLock.RUnlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return paths
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.cacheLock.RUnlock()
|
||||
}
|
||||
if path != "" {
|
||||
prefix = "."
|
||||
for _, part := range strings.Split(path, ".") {
|
||||
kind := reflect.TypeOf(current).Kind()
|
||||
index, err := strconv.Atoi(part)
|
||||
if (kind == reflect.Slice && (err != nil || index < 0 || index >= len(current.([]interface{})))) || (kind != reflect.Slice && kind != reflect.Map) {
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = paths
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return paths
|
||||
}
|
||||
if kind == reflect.Slice {
|
||||
current = current.([]interface{})[index]
|
||||
} else {
|
||||
if current = current.(map[string]interface{})[strings.TrimSpace(part)]; current == nil {
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = paths
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return paths
|
||||
}
|
||||
}
|
||||
|
@ -375,91 +354,68 @@ func (this *UConfig) GetPaths(path string) []string {
|
|||
paths = append(paths, fmt.Sprintf("%s%s%s", path, prefix, key))
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = paths
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return paths
|
||||
}
|
||||
|
||||
func (this *UConfig) value(path string) (string, error) {
|
||||
var current interface{} = this.config
|
||||
|
||||
if cores > 1 {
|
||||
this.RLock()
|
||||
defer this.RUnlock()
|
||||
}
|
||||
if current == nil || path == "" {
|
||||
this.RUnlock()
|
||||
return "", fmt.Errorf("invalid parameter")
|
||||
}
|
||||
if cores > 1 {
|
||||
this.cacheLock.RLock()
|
||||
}
|
||||
if this.cache[path] != nil {
|
||||
if current, ok := this.cache[path].(bool); ok && !current {
|
||||
if cores > 1 {
|
||||
this.cacheLock.RUnlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return "", fmt.Errorf("invalid path")
|
||||
}
|
||||
if current, ok := this.cache[path].(string); ok {
|
||||
if cores > 1 {
|
||||
this.cacheLock.RUnlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return current, nil
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.cacheLock.RUnlock()
|
||||
}
|
||||
for _, part := range strings.Split(path, ".") {
|
||||
kind := reflect.TypeOf(current).Kind()
|
||||
index, err := strconv.Atoi(part)
|
||||
if (kind == reflect.Slice && (err != nil || index < 0 || index >= len(current.([]interface{})))) || (kind != reflect.Slice && kind != reflect.Map) {
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = false
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return "", fmt.Errorf("invalid path")
|
||||
}
|
||||
if kind == reflect.Slice {
|
||||
current = current.([]interface{})[index]
|
||||
} else {
|
||||
if current = current.(map[string]interface{})[strings.TrimSpace(part)]; current == nil {
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = false
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return "", fmt.Errorf("invalid path")
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.TypeOf(current).Kind() == reflect.String {
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = current.(string)
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return current.(string), nil
|
||||
}
|
||||
if cores > 1 {
|
||||
this.cacheLock.Lock()
|
||||
}
|
||||
this.cache[path] = false
|
||||
if cores > 1 {
|
||||
this.cacheLock.Unlock()
|
||||
}
|
||||
this.RUnlock()
|
||||
return "", fmt.Errorf("invalid path")
|
||||
}
|
||||
|
||||
|
@ -486,7 +442,7 @@ func (this *UConfig) GetStringMatchCaptures(path string, fallback, match string)
|
|||
return []string{fallback}
|
||||
}
|
||||
if match != "" {
|
||||
if matcher, err := regexp.Compile(match); err == nil {
|
||||
if matcher := rcache.Get(match); matcher != nil {
|
||||
if matches := matcher.FindStringSubmatch(value); matches != nil {
|
||||
return matches
|
||||
} else {
|
||||
|
|
|
@ -1,545 +0,0 @@
|
|||
package ulog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/syslog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
TIME_NONE int = iota
|
||||
TIME_DATETIME
|
||||
TIME_MSDATETIME
|
||||
TIME_TIMESTAMP
|
||||
TIME_MSTIMESTAMP
|
||||
)
|
||||
|
||||
var (
|
||||
cores int
|
||||
facilities = map[string]syslog.Priority{
|
||||
"user": syslog.LOG_USER,
|
||||
"daemon": syslog.LOG_DAEMON,
|
||||
"local0": syslog.LOG_LOCAL0,
|
||||
"local1": syslog.LOG_LOCAL1,
|
||||
"local2": syslog.LOG_LOCAL2,
|
||||
"local3": syslog.LOG_LOCAL3,
|
||||
"local4": syslog.LOG_LOCAL4,
|
||||
"local5": syslog.LOG_LOCAL5,
|
||||
"local6": syslog.LOG_LOCAL6,
|
||||
"local7": syslog.LOG_LOCAL7,
|
||||
}
|
||||
severities = map[string]syslog.Priority{
|
||||
"error": syslog.LOG_ERR,
|
||||
"warning": syslog.LOG_WARNING,
|
||||
"info": syslog.LOG_INFO,
|
||||
"debug": syslog.LOG_DEBUG,
|
||||
}
|
||||
severityLabels = map[syslog.Priority]string{
|
||||
syslog.LOG_ERR: "ERRO ",
|
||||
syslog.LOG_WARNING: "WARN ",
|
||||
syslog.LOG_INFO: "INFO ",
|
||||
syslog.LOG_DEBUG: "DBUG ",
|
||||
}
|
||||
severityColors = map[syslog.Priority]string{
|
||||
syslog.LOG_ERR: "\x1b[31m",
|
||||
syslog.LOG_WARNING: "\x1b[33m",
|
||||
syslog.LOG_INFO: "\x1b[36m",
|
||||
syslog.LOG_DEBUG: "\x1b[32m",
|
||||
}
|
||||
)
|
||||
|
||||
type FileOutput struct {
|
||||
handle *os.File
|
||||
last time.Time
|
||||
}
|
||||
type ULog struct {
|
||||
file, console, syslog bool
|
||||
fileOutputs map[string]*FileOutput
|
||||
filePath string
|
||||
fileTime int
|
||||
fileLast time.Time
|
||||
fileSeverity bool
|
||||
consoleHandle io.Writer
|
||||
consoleTime int
|
||||
consoleSeverity bool
|
||||
consoleColors bool
|
||||
syslogHandle *syslog.Writer
|
||||
syslogRemote string
|
||||
syslogName string
|
||||
syslogFacility syslog.Priority
|
||||
optionUTC bool
|
||||
level syslog.Priority
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func New(target string) *ULog {
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
log := &ULog{
|
||||
fileOutputs: map[string]*FileOutput{},
|
||||
syslogHandle: nil,
|
||||
}
|
||||
return log.Load(target)
|
||||
}
|
||||
|
||||
func (this *ULog) Load(target string) *ULog {
|
||||
this.Close()
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
|
||||
this.file = false
|
||||
this.filePath = ""
|
||||
this.fileTime = TIME_DATETIME
|
||||
this.fileSeverity = true
|
||||
this.console = false
|
||||
this.consoleTime = TIME_DATETIME
|
||||
this.consoleSeverity = true
|
||||
this.consoleColors = true
|
||||
this.consoleHandle = os.Stderr
|
||||
this.syslog = false
|
||||
this.syslogRemote = ""
|
||||
this.syslogName = filepath.Base(os.Args[0])
|
||||
this.syslogFacility = syslog.LOG_DAEMON
|
||||
this.optionUTC = false
|
||||
this.level = syslog.LOG_INFO
|
||||
for _, target := range regexp.MustCompile("(file|console|syslog|option)\\s*\\(([^\\)]*)\\)").FindAllStringSubmatch(target, -1) {
|
||||
switch strings.ToLower(target[1]) {
|
||||
case "file":
|
||||
this.file = true
|
||||
for _, option := range regexp.MustCompile("([^:=,\\s]+)\\s*[:=]\\s*([^,\\s]+)").FindAllStringSubmatch(target[2], -1) {
|
||||
switch strings.ToLower(option[1]) {
|
||||
case "path":
|
||||
this.filePath = option[2]
|
||||
case "time":
|
||||
option[2] = strings.ToLower(option[2])
|
||||
switch {
|
||||
case option[2] == "datetime":
|
||||
this.fileTime = TIME_DATETIME
|
||||
case option[2] == "msdatetime":
|
||||
this.fileTime = TIME_MSDATETIME
|
||||
case option[2] == "stamp" || option[2] == "timestamp":
|
||||
this.fileTime = TIME_TIMESTAMP
|
||||
case option[2] == "msstamp" || option[2] == "mstimestamp":
|
||||
this.fileTime = TIME_MSTIMESTAMP
|
||||
case option[2] != "1" && option[2] != "true" && option[2] != "on" && option[2] != "yes":
|
||||
this.fileTime = TIME_NONE
|
||||
}
|
||||
case "severity":
|
||||
option[2] = strings.ToLower(option[2])
|
||||
if option[2] != "1" && option[2] != "true" && option[2] != "on" && option[2] != "yes" {
|
||||
this.fileSeverity = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if this.filePath == "" {
|
||||
this.file = false
|
||||
}
|
||||
case "console":
|
||||
this.console = true
|
||||
for _, option := range regexp.MustCompile("([^:=,\\s]+)\\s*[:=]\\s*([^,\\s]+)").FindAllStringSubmatch(target[2], -1) {
|
||||
option[2] = strings.ToLower(option[2])
|
||||
switch strings.ToLower(option[1]) {
|
||||
case "output":
|
||||
if option[2] == "stdout" {
|
||||
this.consoleHandle = os.Stdout
|
||||
}
|
||||
case "time":
|
||||
switch {
|
||||
case option[2] == "datetime":
|
||||
this.consoleTime = TIME_DATETIME
|
||||
case option[2] == "msdatetime":
|
||||
this.consoleTime = TIME_MSDATETIME
|
||||
case option[2] == "stamp" || option[2] == "timestamp":
|
||||
this.consoleTime = TIME_TIMESTAMP
|
||||
case option[2] == "msstamp" || option[2] == "mstimestamp":
|
||||
this.consoleTime = TIME_MSTIMESTAMP
|
||||
case option[2] != "1" && option[2] != "true" && option[2] != "on" && option[2] != "yes":
|
||||
this.consoleTime = TIME_NONE
|
||||
}
|
||||
case "severity":
|
||||
if option[2] != "1" && option[2] != "true" && option[2] != "on" && option[2] != "yes" {
|
||||
this.consoleSeverity = false
|
||||
}
|
||||
case "colors":
|
||||
if option[2] != "1" && option[2] != "true" && option[2] != "on" && option[2] != "yes" {
|
||||
this.consoleColors = false
|
||||
}
|
||||
}
|
||||
}
|
||||
case "syslog":
|
||||
this.syslog = true
|
||||
for _, option := range regexp.MustCompile("([^:=,\\s]+)\\s*[:=]\\s*([^,\\s]+)").FindAllStringSubmatch(target[2], -1) {
|
||||
switch strings.ToLower(option[1]) {
|
||||
case "remote":
|
||||
this.syslogRemote = option[2]
|
||||
if !regexp.MustCompile(":\\d+$").MatchString(this.syslogRemote) {
|
||||
this.syslogRemote += ":514"
|
||||
}
|
||||
case "name":
|
||||
this.syslogName = option[2]
|
||||
case "facility":
|
||||
this.syslogFacility = facilities[strings.ToLower(option[2])]
|
||||
}
|
||||
}
|
||||
case "option":
|
||||
for _, option := range regexp.MustCompile("([^:=,\\s]+)\\s*[:=]\\s*([^,\\s]+)").FindAllStringSubmatch(target[2], -1) {
|
||||
option[2] = strings.ToLower(option[2])
|
||||
switch strings.ToLower(option[1]) {
|
||||
case "utc":
|
||||
if option[2] == "1" || option[2] == "true" || option[2] == "on" || option[2] == "yes" {
|
||||
this.optionUTC = true
|
||||
}
|
||||
case "level":
|
||||
this.level = severities[strings.ToLower(option[2])]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var info syscall.Termios
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, this.consoleHandle.(*os.File).Fd(), syscall.TCGETS, uintptr(unsafe.Pointer(&info)), 0, 0, 0); err != 0 {
|
||||
this.consoleColors = false
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *ULog) Close() {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
defer this.Unlock()
|
||||
}
|
||||
if this.syslogHandle != nil {
|
||||
this.syslogHandle.Close()
|
||||
this.syslogHandle = nil
|
||||
}
|
||||
for path, output := range this.fileOutputs {
|
||||
if output.handle != nil {
|
||||
output.handle.Close()
|
||||
}
|
||||
delete(this.fileOutputs, path)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *ULog) SetLevel(level string) {
|
||||
level = strings.ToLower(level)
|
||||
switch level {
|
||||
case "error":
|
||||
this.level = syslog.LOG_ERR
|
||||
case "warning":
|
||||
this.level = syslog.LOG_WARNING
|
||||
case "info":
|
||||
this.level = syslog.LOG_INFO
|
||||
case "debug":
|
||||
this.level = syslog.LOG_DEBUG
|
||||
}
|
||||
}
|
||||
|
||||
func strftime(layout string, base time.Time) string {
|
||||
var output []string
|
||||
|
||||
length := len(layout)
|
||||
for index := 0; index < length; index++ {
|
||||
switch layout[index] {
|
||||
case '%':
|
||||
if index < length-1 {
|
||||
switch layout[index+1] {
|
||||
case 'a':
|
||||
output = append(output, base.Format("Mon"))
|
||||
case 'A':
|
||||
output = append(output, base.Format("Monday"))
|
||||
case 'b':
|
||||
output = append(output, base.Format("Jan"))
|
||||
case 'B':
|
||||
output = append(output, base.Format("January"))
|
||||
case 'c':
|
||||
output = append(output, base.Format("Mon Jan 2 15:04:05 2006"))
|
||||
case 'C':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Year()/100))
|
||||
case 'd':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Day()))
|
||||
case 'D':
|
||||
output = append(output, fmt.Sprintf("%02d/%02d/%02d", base.Month(), base.Day(), base.Year()%100))
|
||||
case 'e':
|
||||
output = append(output, fmt.Sprintf("%2d", base.Day()))
|
||||
case 'f':
|
||||
output = append(output, fmt.Sprintf("%06d", base.Nanosecond()/1000))
|
||||
case 'F':
|
||||
output = append(output, fmt.Sprintf("%04d-%02d-%02d", base.Year(), base.Month(), base.Day()))
|
||||
case 'g':
|
||||
year, _ := base.ISOWeek()
|
||||
output = append(output, fmt.Sprintf("%02d", year%100))
|
||||
case 'G':
|
||||
year, _ := base.ISOWeek()
|
||||
output = append(output, fmt.Sprintf("%04d", year))
|
||||
case 'h':
|
||||
output = append(output, base.Format("Jan"))
|
||||
case 'H':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Hour()))
|
||||
case 'I':
|
||||
if base.Hour() == 0 || base.Hour() == 12 {
|
||||
output = append(output, "12")
|
||||
} else {
|
||||
output = append(output, fmt.Sprintf("%02d", base.Hour()%12))
|
||||
}
|
||||
case 'j':
|
||||
output = append(output, fmt.Sprintf("%03d", base.YearDay()))
|
||||
case 'k':
|
||||
output = append(output, fmt.Sprintf("%2d", base.Hour()))
|
||||
case 'l':
|
||||
if base.Hour() == 0 || base.Hour() == 12 {
|
||||
output = append(output, "12")
|
||||
} else {
|
||||
output = append(output, fmt.Sprintf("%2d", base.Hour()%12))
|
||||
}
|
||||
case 'm':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Month()))
|
||||
case 'M':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Minute()))
|
||||
case 'n':
|
||||
output = append(output, "\n")
|
||||
case 'p':
|
||||
if base.Hour() < 12 {
|
||||
output = append(output, "AM")
|
||||
} else {
|
||||
output = append(output, "PM")
|
||||
}
|
||||
case 'P':
|
||||
if base.Hour() < 12 {
|
||||
output = append(output, "am")
|
||||
} else {
|
||||
output = append(output, "pm")
|
||||
}
|
||||
case 'r':
|
||||
if base.Hour() == 0 || base.Hour() == 12 {
|
||||
output = append(output, "12")
|
||||
} else {
|
||||
output = append(output, fmt.Sprintf("%02d", base.Hour()%12))
|
||||
}
|
||||
output = append(output, fmt.Sprintf(":%02d:%02d", base.Minute(), base.Second()))
|
||||
if base.Hour() < 12 {
|
||||
output = append(output, " AM")
|
||||
} else {
|
||||
output = append(output, " PM")
|
||||
}
|
||||
case 'R':
|
||||
output = append(output, fmt.Sprintf("%02d:%02d", base.Hour(), base.Minute()))
|
||||
case 's':
|
||||
output = append(output, fmt.Sprintf("%d", base.Unix()))
|
||||
case 'S':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Second()))
|
||||
case 't':
|
||||
output = append(output, "\t")
|
||||
case 'T':
|
||||
output = append(output, fmt.Sprintf("%02d:%02d:%02d", base.Hour(), base.Minute(), base.Second()))
|
||||
case 'u':
|
||||
day := base.Weekday()
|
||||
if day == 0 {
|
||||
day = 7
|
||||
}
|
||||
output = append(output, fmt.Sprintf("%d", day))
|
||||
case 'U':
|
||||
output = append(output, fmt.Sprintf("%d", (base.YearDay()+6-int(base.Weekday()))/7))
|
||||
case 'V':
|
||||
_, week := base.ISOWeek()
|
||||
output = append(output, fmt.Sprintf("%02d", week))
|
||||
case 'w':
|
||||
output = append(output, fmt.Sprintf("%d", base.Weekday()))
|
||||
case 'W':
|
||||
day := int(base.Weekday())
|
||||
if day == 0 {
|
||||
day = 6
|
||||
} else {
|
||||
day -= 1
|
||||
}
|
||||
output = append(output, fmt.Sprintf("%d", (base.YearDay()+6-day)/7))
|
||||
case 'x':
|
||||
output = append(output, fmt.Sprintf("%02d/%02d/%02d", base.Month(), base.Day(), base.Year()%100))
|
||||
case 'X':
|
||||
output = append(output, fmt.Sprintf("%02d:%02d:%02d", base.Hour(), base.Minute(), base.Second()))
|
||||
case 'y':
|
||||
output = append(output, fmt.Sprintf("%02d", base.Year()%100))
|
||||
case 'Y':
|
||||
output = append(output, fmt.Sprintf("%04d", base.Year()))
|
||||
case 'z':
|
||||
output = append(output, base.Format("-0700"))
|
||||
case 'Z':
|
||||
output = append(output, base.Format("MST"))
|
||||
case '%':
|
||||
output = append(output, "%")
|
||||
}
|
||||
index++
|
||||
}
|
||||
default:
|
||||
output = append(output, string(layout[index]))
|
||||
}
|
||||
}
|
||||
return strings.Join(output, "")
|
||||
}
|
||||
|
||||
func (this *ULog) log(now time.Time, severity syslog.Priority, xlayout interface{}, a ...interface{}) {
|
||||
var err error
|
||||
if this.level < severity || (!this.syslog && !this.file && !this.console) {
|
||||
return
|
||||
}
|
||||
layout := ""
|
||||
switch reflect.TypeOf(xlayout).Kind() {
|
||||
case reflect.Map:
|
||||
var buffer bytes.Buffer
|
||||
|
||||
encoder := json.NewEncoder(&buffer)
|
||||
encoder.SetEscapeHTML(false)
|
||||
if err := encoder.Encode(xlayout); err == nil {
|
||||
layout = "%s"
|
||||
a = []interface{}{bytes.TrimSpace(buffer.Bytes())}
|
||||
}
|
||||
case reflect.String:
|
||||
layout = xlayout.(string)
|
||||
}
|
||||
layout = strings.TrimSpace(layout)
|
||||
if this.syslog {
|
||||
if this.syslogHandle == nil {
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
if this.syslogHandle == nil {
|
||||
protocol := ""
|
||||
if this.syslogRemote != "" {
|
||||
protocol = "udp"
|
||||
}
|
||||
if this.syslogHandle, err = syslog.Dial(protocol, this.syslogRemote, this.syslogFacility, this.syslogName); err != nil {
|
||||
this.syslogHandle = nil
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
}
|
||||
if this.syslogHandle != nil {
|
||||
switch severity {
|
||||
case syslog.LOG_ERR:
|
||||
this.syslogHandle.Err(fmt.Sprintf(layout, a...))
|
||||
case syslog.LOG_WARNING:
|
||||
this.syslogHandle.Warning(fmt.Sprintf(layout, a...))
|
||||
case syslog.LOG_INFO:
|
||||
this.syslogHandle.Info(fmt.Sprintf(layout, a...))
|
||||
case syslog.LOG_DEBUG:
|
||||
this.syslogHandle.Debug(fmt.Sprintf(layout, a...))
|
||||
}
|
||||
}
|
||||
}
|
||||
if this.optionUTC {
|
||||
now = now.UTC()
|
||||
} else {
|
||||
now = now.Local()
|
||||
}
|
||||
if this.file {
|
||||
path := strftime(this.filePath, now)
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
if this.fileOutputs[path] == nil {
|
||||
os.MkdirAll(filepath.Dir(path), 0755)
|
||||
if handle, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644); err == nil {
|
||||
this.fileOutputs[path] = &FileOutput{handle: handle}
|
||||
}
|
||||
}
|
||||
if this.fileOutputs[path] != nil && this.fileOutputs[path].handle != nil {
|
||||
prefix := ""
|
||||
switch this.fileTime {
|
||||
case TIME_DATETIME:
|
||||
prefix = fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d ", now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second())
|
||||
case TIME_MSDATETIME:
|
||||
prefix = fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d ", now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second(), now.Nanosecond()/int(time.Millisecond))
|
||||
case TIME_TIMESTAMP:
|
||||
prefix = fmt.Sprintf("%d ", now.Unix())
|
||||
case TIME_MSTIMESTAMP:
|
||||
prefix = fmt.Sprintf("%d ", now.UnixNano()/int64(time.Millisecond))
|
||||
}
|
||||
if this.fileSeverity {
|
||||
prefix += severityLabels[severity]
|
||||
}
|
||||
this.fileOutputs[path].handle.WriteString(fmt.Sprintf(prefix+layout+"\n", a...))
|
||||
this.fileOutputs[path].last = now
|
||||
}
|
||||
if now.Sub(this.fileLast) >= 5*time.Second {
|
||||
this.fileLast = now
|
||||
for path, output := range this.fileOutputs {
|
||||
if now.Sub(output.last) >= 5*time.Second {
|
||||
output.handle.Close()
|
||||
delete(this.fileOutputs, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
}
|
||||
if this.console {
|
||||
prefix := ""
|
||||
switch this.consoleTime {
|
||||
case TIME_DATETIME:
|
||||
prefix = fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d ", now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second())
|
||||
case TIME_MSDATETIME:
|
||||
prefix = fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%03d ", now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second(), now.Nanosecond()/int(time.Millisecond))
|
||||
case TIME_TIMESTAMP:
|
||||
prefix = fmt.Sprintf("%d ", now.Unix())
|
||||
case TIME_MSTIMESTAMP:
|
||||
prefix = fmt.Sprintf("%d ", now.UnixNano()/int64(time.Millisecond))
|
||||
}
|
||||
if this.consoleSeverity {
|
||||
if this.consoleColors {
|
||||
prefix += fmt.Sprintf("%s%s\x1b[0m", severityColors[severity], severityLabels[severity])
|
||||
} else {
|
||||
prefix += severityLabels[severity]
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
this.Lock()
|
||||
}
|
||||
fmt.Fprintf(this.consoleHandle, prefix+layout+"\n", a...)
|
||||
if cores > 1 {
|
||||
this.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *ULog) Error(layout interface{}, a ...interface{}) {
|
||||
this.log(time.Now(), syslog.LOG_ERR, layout, a...)
|
||||
}
|
||||
func (this *ULog) Warn(layout interface{}, a ...interface{}) {
|
||||
this.log(time.Now(), syslog.LOG_WARNING, layout, a...)
|
||||
}
|
||||
func (this *ULog) Info(layout interface{}, a ...interface{}) {
|
||||
this.log(time.Now(), syslog.LOG_INFO, layout, a...)
|
||||
}
|
||||
func (this *ULog) Debug(layout interface{}, a ...interface{}) {
|
||||
this.log(time.Now(), syslog.LOG_DEBUG, layout, a...)
|
||||
}
|
||||
|
||||
func (this *ULog) ErrorTime(now time.Time, layout interface{}, a ...interface{}) {
|
||||
this.log(now, syslog.LOG_ERR, layout, a...)
|
||||
}
|
||||
func (this *ULog) WarnTime(now time.Time, layout interface{}, a ...interface{}) {
|
||||
this.log(now, syslog.LOG_WARNING, layout, a...)
|
||||
}
|
||||
func (this *ULog) InfoTime(now time.Time, layout interface{}, a ...interface{}) {
|
||||
this.log(now, syslog.LOG_INFO, layout, a...)
|
||||
}
|
||||
func (this *ULog) DebugTime(now time.Time, layout interface{}, a ...interface{}) {
|
||||
this.log(now, syslog.LOG_DEBUG, layout, a...)
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package uuid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var initialized bool
|
||||
|
||||
func init() {
|
||||
if !initialized {
|
||||
rand.Seed(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
initialized = true
|
||||
}
|
||||
}
|
||||
|
||||
func UUID() string {
|
||||
var entropy = make([]byte, 16)
|
||||
|
||||
if !initialized {
|
||||
rand.Seed(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
initialized = true
|
||||
}
|
||||
rand.Read(entropy)
|
||||
entropy[6] = (entropy[6] & 0x0f) | 0x40
|
||||
entropy[8] = (entropy[8] & 0x3f) | 0x80
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", entropy[0:4], entropy[4:6], entropy[6:8], entropy[8:10], entropy[10:16])
|
||||
}
|
|
@ -1,244 +0,0 @@
|
|||
package whohas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BACKEND struct {
|
||||
Host string
|
||||
Secure bool
|
||||
Path string
|
||||
Headers map[string]string
|
||||
Penalty time.Duration
|
||||
}
|
||||
type CACHE struct {
|
||||
TTL time.Duration
|
||||
last time.Time
|
||||
items map[string]*LOOKUP
|
||||
sync.RWMutex
|
||||
}
|
||||
type LOOKUP struct {
|
||||
index int
|
||||
deadline time.Time
|
||||
Protocol string
|
||||
Host string
|
||||
Headers map[string]string
|
||||
Size int64
|
||||
Mime string
|
||||
Ranges bool
|
||||
Date time.Time
|
||||
Modified time.Time
|
||||
Expires time.Time
|
||||
}
|
||||
|
||||
var cores int
|
||||
|
||||
func Lookup(path string, backends []BACKEND, timeout time.Duration, cache *CACHE, ckey string) (lookup *LOOKUP) {
|
||||
if cores == 0 {
|
||||
cores = runtime.NumCPU()
|
||||
}
|
||||
if path == "" || backends == nil || len(backends) < 1 || timeout < 100*time.Millisecond {
|
||||
return
|
||||
}
|
||||
|
||||
cpath := path
|
||||
if index := strings.Index(path, "?"); index >= 0 {
|
||||
cpath = path[:index]
|
||||
}
|
||||
cbackends := backends
|
||||
if cache != nil && cache.items != nil {
|
||||
now := time.Now()
|
||||
if cores > 1 {
|
||||
cache.RLock()
|
||||
}
|
||||
if cache.items[cpath] != nil && now.Sub(cache.items[cpath].deadline) < 0 {
|
||||
lookup = cache.items[cpath]
|
||||
if cache.items[cpath].Host != "" {
|
||||
cbackends = []BACKEND{}
|
||||
for _, backend := range backends {
|
||||
if backend.Host == cache.items[cpath].Host {
|
||||
cbackends = append(cbackends, backend)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(cbackends) < 1 {
|
||||
cbackends = backends
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(cbackends) == len(backends) && ckey != "" && cache.items["k"+ckey] != nil && now.Sub(cache.items["k"+ckey].deadline) < 0 {
|
||||
cbackends = []BACKEND{}
|
||||
for _, backend := range backends {
|
||||
if backend.Host == cache.items["k"+ckey].Host {
|
||||
cbackends = append(cbackends, backend)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(cbackends) < 1 {
|
||||
cbackends = backends
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
cache.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
if lookup == nil {
|
||||
inflight := len(cbackends)
|
||||
sink := make(chan LOOKUP, inflight+1)
|
||||
cancels := make([]context.CancelFunc, inflight)
|
||||
for index, backend := range cbackends {
|
||||
var ctx context.Context
|
||||
|
||||
ctx, cancels[index] = context.WithCancel(context.Background())
|
||||
go func(index int, backend BACKEND, ctx context.Context) {
|
||||
lookup := LOOKUP{index: index}
|
||||
if backend.Penalty != 0 && len(cbackends) > 1 {
|
||||
select {
|
||||
case <-time.After(backend.Penalty):
|
||||
case <-ctx.Done():
|
||||
sink <- lookup
|
||||
return
|
||||
}
|
||||
}
|
||||
lookup.Protocol = "http"
|
||||
if backend.Secure {
|
||||
lookup.Protocol = "https"
|
||||
}
|
||||
rpath := path
|
||||
if backend.Path != "" {
|
||||
rpath = backend.Path
|
||||
}
|
||||
if request, err := http.NewRequest(http.MethodHead, lookup.Protocol+"://"+backend.Host+rpath, nil); err == nil {
|
||||
request = request.WithContext(ctx)
|
||||
request.Header.Set("User-Agent", "whohas")
|
||||
if backend.Headers != nil {
|
||||
lookup.Headers = map[string]string{}
|
||||
for name, value := range backend.Headers {
|
||||
lookup.Headers[name] = value
|
||||
request.Header.Set(name, value)
|
||||
}
|
||||
}
|
||||
if response, err := http.DefaultClient.Do(request); err == nil {
|
||||
if response.StatusCode == 200 {
|
||||
lookup.Host = backend.Host
|
||||
lookup.Size, _ = strconv.ParseInt(response.Header.Get("Content-Length"), 10, 64)
|
||||
lookup.Mime = response.Header.Get("Content-Type")
|
||||
if lookup.Mime == "" || lookup.Mime == "application/octet-stream" || lookup.Mime == "text/plain" {
|
||||
if extension := filepath.Ext(path); extension != "" {
|
||||
lookup.Mime = mime.TypeByExtension(extension)
|
||||
}
|
||||
}
|
||||
if response.Header.Get("Accept-Ranges") != "" {
|
||||
lookup.Ranges = true
|
||||
}
|
||||
if header := response.Header.Get("Date"); header != "" {
|
||||
lookup.Date, _ = http.ParseTime(header)
|
||||
} else {
|
||||
lookup.Date = time.Now()
|
||||
}
|
||||
if header := response.Header.Get("Last-Modified"); header != "" {
|
||||
lookup.Modified, _ = http.ParseTime(header)
|
||||
}
|
||||
if header := response.Header.Get("Expires"); header != "" {
|
||||
lookup.Expires, _ = http.ParseTime(header)
|
||||
} else {
|
||||
lookup.Expires = lookup.Date.Add(time.Hour)
|
||||
}
|
||||
if lookup.Expires.Sub(lookup.Date) < 2*time.Second {
|
||||
lookup.Expires = lookup.Date.Add(2 * time.Second)
|
||||
}
|
||||
}
|
||||
response.Body.Close()
|
||||
}
|
||||
}
|
||||
sink <- lookup
|
||||
}(index, backend, ctx)
|
||||
}
|
||||
|
||||
for inflight > 0 {
|
||||
select {
|
||||
case result := <-sink:
|
||||
inflight--
|
||||
cancels[result.index] = nil
|
||||
if result.Host != "" {
|
||||
lookup = &result
|
||||
for index, cancel := range cancels {
|
||||
if cancels[index] != nil && index != result.index {
|
||||
cancel()
|
||||
cancels[index] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-time.After(timeout):
|
||||
for index, cancel := range cancels {
|
||||
if cancels[index] != nil {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(sink)
|
||||
}
|
||||
|
||||
if cache != nil {
|
||||
now := time.Now()
|
||||
if cores > 1 {
|
||||
cache.Lock()
|
||||
}
|
||||
if cache.items == nil {
|
||||
cache.items = map[string]*LOOKUP{}
|
||||
}
|
||||
if now.Sub(cache.last) >= 5*time.Second {
|
||||
cache.last = now
|
||||
for key, item := range cache.items {
|
||||
if now.Sub(item.deadline) >= 0 {
|
||||
delete(cache.items, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
if lookup == nil || lookup.Host == "" {
|
||||
if ckey != "" {
|
||||
delete(cache.items, "k"+ckey)
|
||||
}
|
||||
if cache.items[cpath] == nil {
|
||||
cache.items[cpath] = &LOOKUP{deadline: now.Add(5 * time.Second)}
|
||||
}
|
||||
lookup = nil
|
||||
} else {
|
||||
if cache.TTL < 2*time.Second {
|
||||
cache.TTL = 2 * time.Second
|
||||
}
|
||||
if ckey != "" {
|
||||
cache.items["k"+ckey] = &LOOKUP{Host: lookup.Host, deadline: now.Add(cache.TTL)}
|
||||
}
|
||||
if cache.items[cpath] == nil {
|
||||
ttl := lookup.Expires.Sub(lookup.Date)
|
||||
if ttl < 2*time.Second {
|
||||
ttl = 2 * time.Second
|
||||
}
|
||||
if ttl > cache.TTL {
|
||||
ttl = cache.TTL
|
||||
}
|
||||
if ttl > 10*time.Minute {
|
||||
ttl = 10 * time.Minute
|
||||
}
|
||||
lookup.deadline = now.Add(ttl)
|
||||
cache.items[cpath] = lookup
|
||||
}
|
||||
}
|
||||
if cores > 1 {
|
||||
cache.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
language: go
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.2.x
|
||||
env: GOOS=linux GOARCH=amd64
|
||||
- go: 1.2.x
|
||||
env: GOOS=linux GOARCH=386
|
||||
- go: 1.2.x
|
||||
env: GOOS=windows GOARCH=amd64
|
||||
- go: 1.2.x
|
||||
env: GOOS=windows GOARCH=386
|
||||
- go: 1.3.x
|
||||
- go: 1.4.x
|
||||
- go: 1.5.x
|
||||
- go: 1.6.x
|
||||
- go: 1.7.x
|
||||
- go: 1.8.x
|
||||
- go: 1.9.x
|
||||
- go: 1.10.x
|
||||
- go: 1.11.x
|
||||
env: GOOS=linux GOARCH=amd64
|
||||
- go: 1.11.x
|
||||
env: GOOS=linux GOARCH=386
|
||||
- go: 1.11.x
|
||||
env: GOOS=windows GOARCH=amd64
|
||||
- go: 1.11.x
|
||||
env: GOOS=windows GOARCH=386
|
||||
- go: tip
|
||||
go_import_path: gopkg.in/asn-ber.v1
|
||||
install:
|
||||
- go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v
|
||||
- go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v
|
||||
- go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
|
||||
- go build -v ./...
|
||||
script:
|
||||
- go test -v -cover ./... || go test -v ./...
|
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
|
||||
Portions copyright (c) 2015-2016 go-asn1-ber Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,24 @@
|
|||
[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber)
|
||||
|
||||
|
||||
ASN1 BER Encoding / Decoding Library for the GO programming language.
|
||||
---------------------------------------------------------------------
|
||||
|
||||
Required libraries:
|
||||
None
|
||||
|
||||
Working:
|
||||
Very basic encoding / decoding needed for LDAP protocol
|
||||
|
||||
Tests Implemented:
|
||||
A few
|
||||
|
||||
TODO:
|
||||
Fix all encoding / decoding to conform to ASN1 BER spec
|
||||
Implement Tests / Benchmarks
|
||||
|
||||
---
|
||||
|
||||
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
|
||||
The design is licensed under the Creative Commons 3.0 Attributions license.
|
||||
Read this article for more details: http://blog.golang.org/gopher
|
|
@ -0,0 +1,512 @@
|
|||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for
|
||||
// no limit.
|
||||
var MaxPacketLengthBytes int64 = math.MaxInt32
|
||||
|
||||
type Packet struct {
|
||||
Identifier
|
||||
Value interface{}
|
||||
ByteValue []byte
|
||||
Data *bytes.Buffer
|
||||
Children []*Packet
|
||||
Description string
|
||||
}
|
||||
|
||||
type Identifier struct {
|
||||
ClassType Class
|
||||
TagType Type
|
||||
Tag Tag
|
||||
}
|
||||
|
||||
type Tag uint64
|
||||
|
||||
const (
|
||||
TagEOC Tag = 0x00
|
||||
TagBoolean Tag = 0x01
|
||||
TagInteger Tag = 0x02
|
||||
TagBitString Tag = 0x03
|
||||
TagOctetString Tag = 0x04
|
||||
TagNULL Tag = 0x05
|
||||
TagObjectIdentifier Tag = 0x06
|
||||
TagObjectDescriptor Tag = 0x07
|
||||
TagExternal Tag = 0x08
|
||||
TagRealFloat Tag = 0x09
|
||||
TagEnumerated Tag = 0x0a
|
||||
TagEmbeddedPDV Tag = 0x0b
|
||||
TagUTF8String Tag = 0x0c
|
||||
TagRelativeOID Tag = 0x0d
|
||||
TagSequence Tag = 0x10
|
||||
TagSet Tag = 0x11
|
||||
TagNumericString Tag = 0x12
|
||||
TagPrintableString Tag = 0x13
|
||||
TagT61String Tag = 0x14
|
||||
TagVideotexString Tag = 0x15
|
||||
TagIA5String Tag = 0x16
|
||||
TagUTCTime Tag = 0x17
|
||||
TagGeneralizedTime Tag = 0x18
|
||||
TagGraphicString Tag = 0x19
|
||||
TagVisibleString Tag = 0x1a
|
||||
TagGeneralString Tag = 0x1b
|
||||
TagUniversalString Tag = 0x1c
|
||||
TagCharacterString Tag = 0x1d
|
||||
TagBMPString Tag = 0x1e
|
||||
TagBitmask Tag = 0x1f // xxx11111b
|
||||
|
||||
// HighTag indicates the start of a high-tag byte sequence
|
||||
HighTag Tag = 0x1f // xxx11111b
|
||||
// HighTagContinueBitmask indicates the high-tag byte sequence should continue
|
||||
HighTagContinueBitmask Tag = 0x80 // 10000000b
|
||||
// HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
|
||||
HighTagValueBitmask Tag = 0x7f // 01111111b
|
||||
)
|
||||
|
||||
const (
|
||||
// LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
|
||||
LengthLongFormBitmask = 0x80
|
||||
// LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
|
||||
LengthValueBitmask = 0x7f
|
||||
|
||||
// LengthIndefinite is returned from readLength to indicate an indefinite length
|
||||
LengthIndefinite = -1
|
||||
)
|
||||
|
||||
var tagMap = map[Tag]string{
|
||||
TagEOC: "EOC (End-of-Content)",
|
||||
TagBoolean: "Boolean",
|
||||
TagInteger: "Integer",
|
||||
TagBitString: "Bit String",
|
||||
TagOctetString: "Octet String",
|
||||
TagNULL: "NULL",
|
||||
TagObjectIdentifier: "Object Identifier",
|
||||
TagObjectDescriptor: "Object Descriptor",
|
||||
TagExternal: "External",
|
||||
TagRealFloat: "Real (float)",
|
||||
TagEnumerated: "Enumerated",
|
||||
TagEmbeddedPDV: "Embedded PDV",
|
||||
TagUTF8String: "UTF8 String",
|
||||
TagRelativeOID: "Relative-OID",
|
||||
TagSequence: "Sequence and Sequence of",
|
||||
TagSet: "Set and Set OF",
|
||||
TagNumericString: "Numeric String",
|
||||
TagPrintableString: "Printable String",
|
||||
TagT61String: "T61 String",
|
||||
TagVideotexString: "Videotex String",
|
||||
TagIA5String: "IA5 String",
|
||||
TagUTCTime: "UTC Time",
|
||||
TagGeneralizedTime: "Generalized Time",
|
||||
TagGraphicString: "Graphic String",
|
||||
TagVisibleString: "Visible String",
|
||||
TagGeneralString: "General String",
|
||||
TagUniversalString: "Universal String",
|
||||
TagCharacterString: "Character String",
|
||||
TagBMPString: "BMP String",
|
||||
}
|
||||
|
||||
type Class uint8
|
||||
|
||||
const (
|
||||
ClassUniversal Class = 0 // 00xxxxxxb
|
||||
ClassApplication Class = 64 // 01xxxxxxb
|
||||
ClassContext Class = 128 // 10xxxxxxb
|
||||
ClassPrivate Class = 192 // 11xxxxxxb
|
||||
ClassBitmask Class = 192 // 11xxxxxxb
|
||||
)
|
||||
|
||||
var ClassMap = map[Class]string{
|
||||
ClassUniversal: "Universal",
|
||||
ClassApplication: "Application",
|
||||
ClassContext: "Context",
|
||||
ClassPrivate: "Private",
|
||||
}
|
||||
|
||||
type Type uint8
|
||||
|
||||
const (
|
||||
TypePrimitive Type = 0 // xx0xxxxxb
|
||||
TypeConstructed Type = 32 // xx1xxxxxb
|
||||
TypeBitmask Type = 32 // xx1xxxxxb
|
||||
)
|
||||
|
||||
var TypeMap = map[Type]string{
|
||||
TypePrimitive: "Primitive",
|
||||
TypeConstructed: "Constructed",
|
||||
}
|
||||
|
||||
var Debug bool = false
|
||||
|
||||
func PrintBytes(out io.Writer, buf []byte, indent string) {
|
||||
data_lines := make([]string, (len(buf)/30)+1)
|
||||
num_lines := make([]string, (len(buf)/30)+1)
|
||||
|
||||
for i, b := range buf {
|
||||
data_lines[i/30] += fmt.Sprintf("%02x ", b)
|
||||
num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
|
||||
}
|
||||
|
||||
for i := 0; i < len(data_lines); i++ {
|
||||
out.Write([]byte(indent + data_lines[i] + "\n"))
|
||||
out.Write([]byte(indent + num_lines[i] + "\n\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func PrintPacket(p *Packet) {
|
||||
printPacket(os.Stdout, p, 0, false)
|
||||
}
|
||||
|
||||
func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
|
||||
indent_str := ""
|
||||
|
||||
for len(indent_str) != indent {
|
||||
indent_str += " "
|
||||
}
|
||||
|
||||
class_str := ClassMap[p.ClassType]
|
||||
|
||||
tagtype_str := TypeMap[p.TagType]
|
||||
|
||||
tag_str := fmt.Sprintf("0x%02X", p.Tag)
|
||||
|
||||
if p.ClassType == ClassUniversal {
|
||||
tag_str = tagMap[p.Tag]
|
||||
}
|
||||
|
||||
value := fmt.Sprint(p.Value)
|
||||
description := ""
|
||||
|
||||
if p.Description != "" {
|
||||
description = p.Description + ": "
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
|
||||
|
||||
if printBytes {
|
||||
PrintBytes(out, p.Bytes(), indent_str)
|
||||
}
|
||||
|
||||
for _, child := range p.Children {
|
||||
printPacket(out, child, indent+1, printBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadPacket reads a single Packet from the reader
|
||||
func ReadPacket(reader io.Reader) (*Packet, error) {
|
||||
p, _, err := readPacket(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func DecodeString(data []byte) string {
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func ParseInt64(bytes []byte) (ret int64, err error) {
|
||||
if len(bytes) > 8 {
|
||||
// We'll overflow an int64 in this case.
|
||||
err = fmt.Errorf("integer too large")
|
||||
return
|
||||
}
|
||||
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
|
||||
ret <<= 8
|
||||
ret |= int64(bytes[bytesRead])
|
||||
}
|
||||
|
||||
// Shift up and down in order to sign extend the result.
|
||||
ret <<= 64 - uint8(len(bytes))*8
|
||||
ret >>= 64 - uint8(len(bytes))*8
|
||||
return
|
||||
}
|
||||
|
||||
func encodeInteger(i int64) []byte {
|
||||
n := int64Length(i)
|
||||
out := make([]byte, n)
|
||||
|
||||
var j int
|
||||
for ; n > 0; n-- {
|
||||
out[j] = (byte(i >> uint((n-1)*8)))
|
||||
j++
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func int64Length(i int64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 127 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
for i < -128 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DecodePacket decodes the given bytes into a single Packet
|
||||
// If a decode error is encountered, nil is returned.
|
||||
func DecodePacket(data []byte) *Packet {
|
||||
p, _, _ := readPacket(bytes.NewBuffer(data))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// DecodePacketErr decodes the given bytes into a single Packet
|
||||
// If a decode error is encountered, nil is returned
|
||||
func DecodePacketErr(data []byte) (*Packet, error) {
|
||||
p, _, err := readPacket(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// readPacket reads a single Packet from the reader, returning the number of bytes read
|
||||
func readPacket(reader io.Reader) (*Packet, int, error) {
|
||||
identifier, length, read, err := readHeader(reader)
|
||||
if err != nil {
|
||||
return nil, read, err
|
||||
}
|
||||
|
||||
p := &Packet{
|
||||
Identifier: identifier,
|
||||
}
|
||||
|
||||
p.Data = new(bytes.Buffer)
|
||||
p.Children = make([]*Packet, 0, 2)
|
||||
p.Value = nil
|
||||
|
||||
if p.TagType == TypeConstructed {
|
||||
// TODO: if universal, ensure tag type is allowed to be constructed
|
||||
|
||||
// Track how much content we've read
|
||||
contentRead := 0
|
||||
for {
|
||||
if length != LengthIndefinite {
|
||||
// End if we've read what we've been told to
|
||||
if contentRead == length {
|
||||
break
|
||||
}
|
||||
// Detect if a packet boundary didn't fall on the expected length
|
||||
if contentRead > length {
|
||||
return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the next packet
|
||||
child, r, err := readPacket(reader)
|
||||
if err != nil {
|
||||
return nil, read, err
|
||||
}
|
||||
contentRead += r
|
||||
read += r
|
||||
|
||||
// Test is this is the EOC marker for our packet
|
||||
if isEOCPacket(child) {
|
||||
if length == LengthIndefinite {
|
||||
break
|
||||
}
|
||||
return nil, read, errors.New("eoc child not allowed with definite length")
|
||||
}
|
||||
|
||||
// Append and continue
|
||||
p.AppendChild(child)
|
||||
}
|
||||
return p, read, nil
|
||||
}
|
||||
|
||||
if length == LengthIndefinite {
|
||||
return nil, read, errors.New("indefinite length used with primitive type")
|
||||
}
|
||||
|
||||
// Read definite-length content
|
||||
if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes {
|
||||
return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes)
|
||||
}
|
||||
content := make([]byte, length, length)
|
||||
if length > 0 {
|
||||
_, err := io.ReadFull(reader, content)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, read, io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, read, err
|
||||
}
|
||||
read += length
|
||||
}
|
||||
|
||||
if p.ClassType == ClassUniversal {
|
||||
p.Data.Write(content)
|
||||
p.ByteValue = content
|
||||
|
||||
switch p.Tag {
|
||||
case TagEOC:
|
||||
case TagBoolean:
|
||||
val, _ := ParseInt64(content)
|
||||
|
||||
p.Value = val != 0
|
||||
case TagInteger:
|
||||
p.Value, _ = ParseInt64(content)
|
||||
case TagBitString:
|
||||
case TagOctetString:
|
||||
// the actual string encoding is not known here
|
||||
// (e.g. for LDAP content is already an UTF8-encoded
|
||||
// string). Return the data without further processing
|
||||
p.Value = DecodeString(content)
|
||||
case TagNULL:
|
||||
case TagObjectIdentifier:
|
||||
case TagObjectDescriptor:
|
||||
case TagExternal:
|
||||
case TagRealFloat:
|
||||
case TagEnumerated:
|
||||
p.Value, _ = ParseInt64(content)
|
||||
case TagEmbeddedPDV:
|
||||
case TagUTF8String:
|
||||
p.Value = DecodeString(content)
|
||||
case TagRelativeOID:
|
||||
case TagSequence:
|
||||
case TagSet:
|
||||
case TagNumericString:
|
||||
case TagPrintableString:
|
||||
p.Value = DecodeString(content)
|
||||
case TagT61String:
|
||||
case TagVideotexString:
|
||||
case TagIA5String:
|
||||
case TagUTCTime:
|
||||
case TagGeneralizedTime:
|
||||
case TagGraphicString:
|
||||
case TagVisibleString:
|
||||
case TagGeneralString:
|
||||
case TagUniversalString:
|
||||
case TagCharacterString:
|
||||
case TagBMPString:
|
||||
}
|
||||
} else {
|
||||
p.Data.Write(content)
|
||||
}
|
||||
|
||||
return p, read, nil
|
||||
}
|
||||
|
||||
func (p *Packet) Bytes() []byte {
|
||||
var out bytes.Buffer
|
||||
|
||||
out.Write(encodeIdentifier(p.Identifier))
|
||||
out.Write(encodeLength(p.Data.Len()))
|
||||
out.Write(p.Data.Bytes())
|
||||
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
func (p *Packet) AppendChild(child *Packet) {
|
||||
p.Data.Write(child.Bytes())
|
||||
p.Children = append(p.Children, child)
|
||||
}
|
||||
|
||||
func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
|
||||
p := new(Packet)
|
||||
|
||||
p.ClassType = ClassType
|
||||
p.TagType = TagType
|
||||
p.Tag = Tag
|
||||
p.Data = new(bytes.Buffer)
|
||||
|
||||
p.Children = make([]*Packet, 0, 2)
|
||||
|
||||
p.Value = Value
|
||||
p.Description = Description
|
||||
|
||||
if Value != nil {
|
||||
v := reflect.ValueOf(Value)
|
||||
|
||||
if ClassType == ClassUniversal {
|
||||
switch Tag {
|
||||
case TagOctetString:
|
||||
sv, ok := v.Interface().(string)
|
||||
|
||||
if ok {
|
||||
p.Data.Write([]byte(sv))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewSequence(Description string) *Packet {
|
||||
return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
|
||||
}
|
||||
|
||||
func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
|
||||
intValue := int64(0)
|
||||
|
||||
if Value {
|
||||
intValue = 1
|
||||
}
|
||||
|
||||
p := Encode(ClassType, TagType, Tag, nil, Description)
|
||||
|
||||
p.Value = Value
|
||||
p.Data.Write(encodeInteger(intValue))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
|
||||
p := Encode(ClassType, TagType, Tag, nil, Description)
|
||||
|
||||
p.Value = Value
|
||||
switch v := Value.(type) {
|
||||
case int:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int64:
|
||||
p.Data.Write(encodeInteger(v))
|
||||
case uint64:
|
||||
// TODO : check range or add encodeUInt...
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int32:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint32:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int16:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint16:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case int8:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
case uint8:
|
||||
p.Data.Write(encodeInteger(int64(v)))
|
||||
default:
|
||||
// TODO : add support for big.Int ?
|
||||
panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
|
||||
p := Encode(ClassType, TagType, Tag, nil, Description)
|
||||
|
||||
p.Value = Value
|
||||
p.Data.Write([]byte(Value))
|
||||
|
||||
return p
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package ber
|
||||
|
||||
func encodeUnsignedInteger(i uint64) []byte {
|
||||
n := uint64Length(i)
|
||||
out := make([]byte, n)
|
||||
|
||||
var j int
|
||||
for ; n > 0; n-- {
|
||||
out[j] = (byte(i >> uint((n-1)*8)))
|
||||
j++
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func uint64Length(i uint64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
|
||||
if i, c, err := readIdentifier(reader); err != nil {
|
||||
return Identifier{}, 0, read, err
|
||||
} else {
|
||||
identifier = i
|
||||
read += c
|
||||
}
|
||||
|
||||
if l, c, err := readLength(reader); err != nil {
|
||||
return Identifier{}, 0, read, err
|
||||
} else {
|
||||
length = l
|
||||
read += c
|
||||
}
|
||||
|
||||
// Validate length type with identifier (x.600, 8.1.3.2.a)
|
||||
if length == LengthIndefinite && identifier.TagType == TypePrimitive {
|
||||
return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
|
||||
}
|
||||
|
||||
if length < LengthIndefinite {
|
||||
err = fmt.Errorf("length cannot be less than %d", LengthIndefinite)
|
||||
return
|
||||
}
|
||||
|
||||
return identifier, length, read, nil
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readIdentifier(reader io.Reader) (Identifier, int, error) {
|
||||
identifier := Identifier{}
|
||||
read := 0
|
||||
|
||||
// identifier byte
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading identifier byte: %v\n", err)
|
||||
}
|
||||
return Identifier{}, read, err
|
||||
}
|
||||
read++
|
||||
|
||||
identifier.ClassType = Class(b) & ClassBitmask
|
||||
identifier.TagType = Type(b) & TypeBitmask
|
||||
|
||||
if tag := Tag(b) & TagBitmask; tag != HighTag {
|
||||
// short-form tag
|
||||
identifier.Tag = tag
|
||||
return identifier, read, nil
|
||||
}
|
||||
|
||||
// high-tag-number tag
|
||||
tagBytes := 0
|
||||
for {
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
|
||||
}
|
||||
return Identifier{}, read, err
|
||||
}
|
||||
tagBytes++
|
||||
read++
|
||||
|
||||
// Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
|
||||
identifier.Tag <<= 7
|
||||
identifier.Tag |= Tag(b) & HighTagValueBitmask
|
||||
|
||||
// First byte may not be all zeros (x.690, 8.1.2.4.2.c)
|
||||
if tagBytes == 1 && identifier.Tag == 0 {
|
||||
return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
|
||||
}
|
||||
// Overflow of int64
|
||||
// TODO: support big int tags?
|
||||
if tagBytes > 9 {
|
||||
return Identifier{}, read, errors.New("high-tag-number tag overflow")
|
||||
}
|
||||
|
||||
// Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
|
||||
if Tag(b)&HighTagContinueBitmask == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return identifier, read, nil
|
||||
}
|
||||
|
||||
func encodeIdentifier(identifier Identifier) []byte {
|
||||
b := []byte{0x0}
|
||||
b[0] |= byte(identifier.ClassType)
|
||||
b[0] |= byte(identifier.TagType)
|
||||
|
||||
if identifier.Tag < HighTag {
|
||||
// Short-form
|
||||
b[0] |= byte(identifier.Tag)
|
||||
} else {
|
||||
// high-tag-number
|
||||
b[0] |= byte(HighTag)
|
||||
|
||||
tag := identifier.Tag
|
||||
|
||||
b = append(b, encodeHighTag(tag)...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func encodeHighTag(tag Tag) []byte {
|
||||
// set cap=4 to hopefully avoid additional allocations
|
||||
b := make([]byte, 0, 4)
|
||||
for tag != 0 {
|
||||
// t := last 7 bits of tag (HighTagValueBitmask = 0x7F)
|
||||
t := tag & HighTagValueBitmask
|
||||
|
||||
// right shift tag 7 to remove what was just pulled off
|
||||
tag >>= 7
|
||||
|
||||
// if b already has entries this entry needs a continuation bit (0x80)
|
||||
if len(b) != 0 {
|
||||
t |= HighTagContinueBitmask
|
||||
}
|
||||
|
||||
b = append(b, byte(t))
|
||||
}
|
||||
// reverse
|
||||
// since bits were pulled off 'tag' small to high the byte slice is in reverse order.
|
||||
// example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)}
|
||||
// this needs to be reversed into 0x81 0x7F
|
||||
for i, j := 0, len(b)-1; i < len(b)/2; i++ {
|
||||
b[i], b[j-i] = b[j-i], b[i]
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package ber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func readLength(reader io.Reader) (length int, read int, err error) {
|
||||
// length byte
|
||||
b, err := readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading length byte: %v\n", err)
|
||||
}
|
||||
return 0, 0, err
|
||||
}
|
||||
read++
|
||||
|
||||
switch {
|
||||
case b == 0xFF:
|
||||
// Invalid 0xFF (x.600, 8.1.3.5.c)
|
||||
return 0, read, errors.New("invalid length byte 0xff")
|
||||
|
||||
case b == LengthLongFormBitmask:
|
||||
// Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
|
||||
length = LengthIndefinite
|
||||
|
||||
case b&LengthLongFormBitmask == 0:
|
||||
// Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
|
||||
length = int(b) & LengthValueBitmask
|
||||
|
||||
case b&LengthLongFormBitmask != 0:
|
||||
// Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
|
||||
lengthBytes := int(b) & LengthValueBitmask
|
||||
// Protect against overflow
|
||||
// TODO: support big int length?
|
||||
if lengthBytes > 8 {
|
||||
return 0, read, errors.New("long-form length overflow")
|
||||
}
|
||||
|
||||
// Accumulate into a 64-bit variable
|
||||
var length64 int64
|
||||
for i := 0; i < lengthBytes; i++ {
|
||||
b, err = readByte(reader)
|
||||
if err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
|
||||
}
|
||||
return 0, read, err
|
||||
}
|
||||
read++
|
||||
|
||||
// x.600, 8.1.3.5
|
||||
length64 <<= 8
|
||||
length64 |= int64(b)
|
||||
}
|
||||
|
||||
// Cast to a platform-specific integer
|
||||
length = int(length64)
|
||||
// Ensure we didn't overflow
|
||||
if int64(length) != length64 {
|
||||
return 0, read, errors.New("long-form length overflow")
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, read, errors.New("invalid length byte")
|
||||
}
|
||||
|
||||
return length, read, nil
|
||||
}
|
||||
|
||||
func encodeLength(length int) []byte {
|
||||
length_bytes := encodeUnsignedInteger(uint64(length))
|
||||
if length > 127 || len(length_bytes) > 1 {
|
||||
longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))}
|
||||
longFormBytes = append(longFormBytes, length_bytes...)
|
||||
length_bytes = longFormBytes
|
||||
}
|
||||
return length_bytes
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package ber
|
||||
|
||||
import "io"
|
||||
|
||||
func readByte(reader io.Reader) (byte, error) {
|
||||
bytes := make([]byte, 1, 1)
|
||||
_, err := io.ReadFull(reader, bytes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return bytes[0], nil
|
||||
}
|
||||
|
||||
func isEOCPacket(p *Packet) bool {
|
||||
return p != nil &&
|
||||
p.Tag == TagEOC &&
|
||||
p.ClassType == ClassUniversal &&
|
||||
p.TagType == TypePrimitive &&
|
||||
len(p.ByteValue) == 0 &&
|
||||
len(p.Children) == 0
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
language: go
|
||||
env:
|
||||
global:
|
||||
- VET_VERSIONS="1.6 1.7 1.8 1.9 tip"
|
||||
- LINT_VERSIONS="1.6 1.7 1.8 1.9 tip"
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
go_import_path: gopkg.in/ldap.v2
|
||||
install:
|
||||
- go get gopkg.in/asn1-ber.v1
|
||||
- go get gopkg.in/ldap.v2
|
||||
- go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/golang/lint/golint || true
|
||||
- go build -v ./...
|
||||
script:
|
||||
- make test
|
||||
- make fmt
|
||||
- if [[ "$VET_VERSIONS" == *"$TRAVIS_GO_VERSION"* ]]; then make vet; fi
|
||||
- if [[ "$LINT_VERSIONS" == *"$TRAVIS_GO_VERSION"* ]]; then make lint; fi
|
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
|
||||
Portions copyright (c) 2015-2016 go-ldap Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,52 @@
|
|||
.PHONY: default install build test quicktest fmt vet lint
|
||||
|
||||
GO_VERSION := $(shell go version | cut -d' ' -f3 | cut -d. -f2)
|
||||
|
||||
# Only use the `-race` flag on newer versions of Go
|
||||
IS_OLD_GO := $(shell test $(GO_VERSION) -le 2 && echo true)
|
||||
ifeq ($(IS_OLD_GO),true)
|
||||
RACE_FLAG :=
|
||||
else
|
||||
RACE_FLAG := -race -cpu 1,2,4
|
||||
endif
|
||||
|
||||
default: fmt vet lint build quicktest
|
||||
|
||||
install:
|
||||
go get -t -v ./...
|
||||
|
||||
build:
|
||||
go build -v ./...
|
||||
|
||||
test:
|
||||
go test -v $(RACE_FLAG) -cover ./...
|
||||
|
||||
quicktest:
|
||||
go test ./...
|
||||
|
||||
# Capture output and force failure when there is non-empty output
|
||||
fmt:
|
||||
@echo gofmt -l .
|
||||
@OUTPUT=`gofmt -l . 2>&1`; \
|
||||
if [ "$$OUTPUT" ]; then \
|
||||
echo "gofmt must be run on the following files:"; \
|
||||
echo "$$OUTPUT"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Only run on go1.5+
|
||||
vet:
|
||||
go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult .
|
||||
|
||||
# https://github.com/golang/lint
|
||||
# go get github.com/golang/lint/golint
|
||||
# Capture output and force failure when there is non-empty output
|
||||
# Only run on go1.5+
|
||||
lint:
|
||||
@echo golint ./...
|
||||
@OUTPUT=`golint ./... 2>&1`; \
|
||||
if [ "$$OUTPUT" ]; then \
|
||||
echo "golint errors:"; \
|
||||
echo "$$OUTPUT"; \
|
||||
exit 1; \
|
||||
fi
|
|
@ -0,0 +1,53 @@
|
|||
[![GoDoc](https://godoc.org/gopkg.in/ldap.v2?status.svg)](https://godoc.org/gopkg.in/ldap.v2)
|
||||
[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
|
||||
|
||||
# Basic LDAP v3 functionality for the GO programming language.
|
||||
|
||||
## Install
|
||||
|
||||
For the latest version use:
|
||||
|
||||
go get gopkg.in/ldap.v2
|
||||
|
||||
Import the latest version with:
|
||||
|
||||
import "gopkg.in/ldap.v2"
|
||||
|
||||
## Required Libraries:
|
||||
|
||||
- gopkg.in/asn1-ber.v1
|
||||
|
||||
## Features:
|
||||
|
||||
- Connecting to LDAP server (non-TLS, TLS, STARTTLS)
|
||||
- Binding to LDAP server
|
||||
- Searching for entries
|
||||
- Filter Compile / Decompile
|
||||
- Paging Search Results
|
||||
- Modify Requests / Responses
|
||||
- Add Requests / Responses
|
||||
- Delete Requests / Responses
|
||||
|
||||
## Examples:
|
||||
|
||||
- search
|
||||
- modify
|
||||
|
||||
## Contributing:
|
||||
|
||||
Bug reports and pull requests are welcome!
|
||||
|
||||
Before submitting a pull request, please make sure tests and verification scripts pass:
|
||||
```
|
||||
make all
|
||||
```
|
||||
|
||||
To set up a pre-push hook to run the tests and verify scripts before pushing:
|
||||
```
|
||||
ln -s ../../.githooks/pre-push .git/hooks/pre-push
|
||||
```
|
||||
|
||||
---
|
||||
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
|
||||
The design is licensed under the Creative Commons 3.0 Attributions license.
|
||||
Read this article for more details: http://blog.golang.org/gopher
|
|
@ -0,0 +1,113 @@
|
|||
//
|
||||
// https://tools.ietf.org/html/rfc4511
|
||||
//
|
||||
// AddRequest ::= [APPLICATION 8] SEQUENCE {
|
||||
// entry LDAPDN,
|
||||
// attributes AttributeList }
|
||||
//
|
||||
// AttributeList ::= SEQUENCE OF attribute Attribute
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// Attribute represents an LDAP attribute
|
||||
type Attribute struct {
|
||||
// Type is the name of the LDAP attribute
|
||||
Type string
|
||||
// Vals are the LDAP attribute values
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func (a *Attribute) encode() *ber.Packet {
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
|
||||
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
|
||||
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
|
||||
for _, value := range a.Vals {
|
||||
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
|
||||
}
|
||||
seq.AppendChild(set)
|
||||
return seq
|
||||
}
|
||||
|
||||
// AddRequest represents an LDAP AddRequest operation
|
||||
type AddRequest struct {
|
||||
// DN identifies the entry being added
|
||||
DN string
|
||||
// Attributes list the attributes of the new entry
|
||||
Attributes []Attribute
|
||||
}
|
||||
|
||||
func (a AddRequest) encode() *ber.Packet {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN"))
|
||||
attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
|
||||
for _, attribute := range a.Attributes {
|
||||
attributes.AppendChild(attribute.encode())
|
||||
}
|
||||
request.AppendChild(attributes)
|
||||
return request
|
||||
}
|
||||
|
||||
// Attribute adds an attribute with the given type and values
|
||||
func (a *AddRequest) Attribute(attrType string, attrVals []string) {
|
||||
a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals})
|
||||
}
|
||||
|
||||
// NewAddRequest returns an AddRequest for the given DN, with no attributes
|
||||
func NewAddRequest(dn string) *AddRequest {
|
||||
return &AddRequest{
|
||||
DN: dn,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add performs the given AddRequest
|
||||
func (l *Conn) Add(addRequest *AddRequest) error {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
packet.AppendChild(addRequest.encode())
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationAddResponse {
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
// +build go1.4
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// For compilers that support it, we just use the underlying sync/atomic.Value
|
||||
// type.
|
||||
type atomicValue struct {
|
||||
atomic.Value
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// +build !go1.4
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This is a helper type that emulates the use of the "sync/atomic.Value"
|
||||
// struct that's available in Go 1.4 and up.
|
||||
type atomicValue struct {
|
||||
value interface{}
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func (av *atomicValue) Store(val interface{}) {
|
||||
av.lock.Lock()
|
||||
av.value = val
|
||||
av.lock.Unlock()
|
||||
}
|
||||
|
||||
func (av *atomicValue) Load() interface{} {
|
||||
av.lock.RLock()
|
||||
ret := av.value
|
||||
av.lock.RUnlock()
|
||||
|
||||
return ret
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// SimpleBindRequest represents a username/password bind operation
|
||||
type SimpleBindRequest struct {
|
||||
// Username is the name of the Directory object that the client wishes to bind as
|
||||
Username string
|
||||
// Password is the credentials to bind with
|
||||
Password string
|
||||
// Controls are optional controls to send with the bind request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// SimpleBindResult contains the response from the server
|
||||
type SimpleBindResult struct {
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// NewSimpleBindRequest returns a bind request
|
||||
func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
|
||||
return &SimpleBindRequest{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Controls: controls,
|
||||
}
|
||||
}
|
||||
|
||||
func (bindRequest *SimpleBindRequest) encode() *ber.Packet {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name"))
|
||||
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password"))
|
||||
|
||||
request.AppendChild(encodeControls(bindRequest.Controls))
|
||||
|
||||
return request
|
||||
}
|
||||
|
||||
// SimpleBind performs the simple bind operation defined in the given request
|
||||
func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
encodedBindRequest := simpleBindRequest.encode()
|
||||
packet.AppendChild(encodedBindRequest)
|
||||
|
||||
if l.Debug {
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
result := &SimpleBindResult{
|
||||
Controls: make([]Control, 0),
|
||||
}
|
||||
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
result.Controls = append(result.Controls, DecodeControl(child))
|
||||
}
|
||||
}
|
||||
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return result, NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Bind performs a bind with the given username and password
|
||||
func (l *Conn) Bind(username, password string) error {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
bindRequest := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
|
||||
bindRequest.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
|
||||
bindRequest.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, username, "User Name"))
|
||||
bindRequest.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, password, "Password"))
|
||||
packet.AppendChild(bindRequest)
|
||||
|
||||
if l.Debug {
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package ldap
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client knows how to interact with an LDAP server
|
||||
type Client interface {
|
||||
Start()
|
||||
StartTLS(config *tls.Config) error
|
||||
Close()
|
||||
SetTimeout(time.Duration)
|
||||
|
||||
Bind(username, password string) error
|
||||
SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error)
|
||||
|
||||
Add(addRequest *AddRequest) error
|
||||
Del(delRequest *DelRequest) error
|
||||
Modify(modifyRequest *ModifyRequest) error
|
||||
|
||||
Compare(dn, attribute, value string) (bool, error)
|
||||
PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error)
|
||||
|
||||
Search(searchRequest *SearchRequest) (*SearchResult, error)
|
||||
SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// File contains Compare functionality
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc4511
|
||||
//
|
||||
// CompareRequest ::= [APPLICATION 14] SEQUENCE {
|
||||
// entry LDAPDN,
|
||||
// ava AttributeValueAssertion }
|
||||
//
|
||||
// AttributeValueAssertion ::= SEQUENCE {
|
||||
// attributeDesc AttributeDescription,
|
||||
// assertionValue AssertionValue }
|
||||
//
|
||||
// AttributeDescription ::= LDAPString
|
||||
// -- Constrained to <attributedescription>
|
||||
// -- [RFC4512]
|
||||
//
|
||||
// AttributeValue ::= OCTET STRING
|
||||
//
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
|
||||
// false with any error that occurs if any.
|
||||
func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN"))
|
||||
|
||||
ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
|
||||
ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc"))
|
||||
ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagOctetString, value, "AssertionValue"))
|
||||
request.AppendChild(ava)
|
||||
packet.AppendChild(request)
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return false, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationCompareResponse {
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode == LDAPResultCompareTrue {
|
||||
return true, nil
|
||||
} else if resultCode == LDAPResultCompareFalse {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
|
@ -0,0 +1,470 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// MessageQuit causes the processMessages loop to exit
|
||||
MessageQuit = 0
|
||||
// MessageRequest sends a request to the server
|
||||
MessageRequest = 1
|
||||
// MessageResponse receives a response from the server
|
||||
MessageResponse = 2
|
||||
// MessageFinish indicates the client considers a particular message ID to be finished
|
||||
MessageFinish = 3
|
||||
// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
|
||||
MessageTimeout = 4
|
||||
)
|
||||
|
||||
// PacketResponse contains the packet or error encountered reading a response
|
||||
type PacketResponse struct {
|
||||
// Packet is the packet read from the server
|
||||
Packet *ber.Packet
|
||||
// Error is an error encountered while reading
|
||||
Error error
|
||||
}
|
||||
|
||||
// ReadPacket returns the packet or an error
|
||||
func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
|
||||
if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
|
||||
}
|
||||
return pr.Packet, pr.Error
|
||||
}
|
||||
|
||||
type messageContext struct {
|
||||
id int64
|
||||
// close(done) should only be called from finishMessage()
|
||||
done chan struct{}
|
||||
// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
|
||||
responses chan *PacketResponse
|
||||
}
|
||||
|
||||
// sendResponse should only be called within the processMessages() loop which
|
||||
// is also responsible for closing the responses channel.
|
||||
func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
|
||||
select {
|
||||
case msgCtx.responses <- packet:
|
||||
// Successfully sent packet to message handler.
|
||||
case <-msgCtx.done:
|
||||
// The request handler is done and will not receive more
|
||||
// packets.
|
||||
}
|
||||
}
|
||||
|
||||
type messagePacket struct {
|
||||
Op int
|
||||
MessageID int64
|
||||
Packet *ber.Packet
|
||||
Context *messageContext
|
||||
}
|
||||
|
||||
type sendMessageFlags uint
|
||||
|
||||
const (
|
||||
startTLS sendMessageFlags = 1 << iota
|
||||
)
|
||||
|
||||
// Conn represents an LDAP Connection
|
||||
type Conn struct {
|
||||
conn net.Conn
|
||||
isTLS bool
|
||||
closing uint32
|
||||
closeErr atomicValue
|
||||
isStartingTLS bool
|
||||
Debug debugging
|
||||
chanConfirm chan struct{}
|
||||
messageContexts map[int64]*messageContext
|
||||
chanMessage chan *messagePacket
|
||||
chanMessageID chan int64
|
||||
wgClose sync.WaitGroup
|
||||
outstandingRequests uint
|
||||
messageMutex sync.Mutex
|
||||
requestTimeout int64
|
||||
}
|
||||
|
||||
var _ Client = &Conn{}
|
||||
|
||||
// DefaultTimeout is a package-level variable that sets the timeout value
|
||||
// used for the Dial and DialTLS methods.
|
||||
//
|
||||
// WARNING: since this is a package-level variable, setting this value from
|
||||
// multiple places will probably result in undesired behaviour.
|
||||
var DefaultTimeout = 60 * time.Second
|
||||
|
||||
// Dial connects to the given address on the given network using net.Dial
|
||||
// and then returns a new Conn for the connection.
|
||||
func Dial(network, addr string) (*Conn, error) {
|
||||
c, err := net.DialTimeout(network, addr, DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
conn := NewConn(c, false)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialTLS connects to the given address on the given network using tls.Dial
|
||||
// and then returns a new Conn for the connection.
|
||||
func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
|
||||
dc, err := net.DialTimeout(network, addr, DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
c := tls.Client(dc, config)
|
||||
err = c.Handshake()
|
||||
if err != nil {
|
||||
// Handshake error, close the established connection before we return an error
|
||||
dc.Close()
|
||||
return nil, NewError(ErrorNetwork, err)
|
||||
}
|
||||
conn := NewConn(c, true)
|
||||
conn.Start()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// NewConn returns a new Conn using conn for network I/O.
|
||||
func NewConn(conn net.Conn, isTLS bool) *Conn {
|
||||
return &Conn{
|
||||
conn: conn,
|
||||
chanConfirm: make(chan struct{}),
|
||||
chanMessageID: make(chan int64),
|
||||
chanMessage: make(chan *messagePacket, 10),
|
||||
messageContexts: map[int64]*messageContext{},
|
||||
requestTimeout: 0,
|
||||
isTLS: isTLS,
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes goroutines to read responses and process messages
|
||||
func (l *Conn) Start() {
|
||||
go l.reader()
|
||||
go l.processMessages()
|
||||
l.wgClose.Add(1)
|
||||
}
|
||||
|
||||
// isClosing returns whether or not we're currently closing.
|
||||
func (l *Conn) isClosing() bool {
|
||||
return atomic.LoadUint32(&l.closing) == 1
|
||||
}
|
||||
|
||||
// setClosing sets the closing value to true
|
||||
func (l *Conn) setClosing() bool {
|
||||
return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
|
||||
}
|
||||
|
||||
// Close closes the connection.
|
||||
func (l *Conn) Close() {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
|
||||
if l.setClosing() {
|
||||
l.Debug.Printf("Sending quit message and waiting for confirmation")
|
||||
l.chanMessage <- &messagePacket{Op: MessageQuit}
|
||||
<-l.chanConfirm
|
||||
close(l.chanMessage)
|
||||
|
||||
l.Debug.Printf("Closing network connection")
|
||||
if err := l.conn.Close(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
l.wgClose.Done()
|
||||
}
|
||||
l.wgClose.Wait()
|
||||
}
|
||||
|
||||
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
|
||||
func (l *Conn) SetTimeout(timeout time.Duration) {
|
||||
if timeout > 0 {
|
||||
atomic.StoreInt64(&l.requestTimeout, int64(timeout))
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the next available messageID
|
||||
func (l *Conn) nextMessageID() int64 {
|
||||
if messageID, ok := <-l.chanMessageID; ok {
|
||||
return messageID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// StartTLS sends the command to start a TLS session and then creates a new TLS Client
|
||||
func (l *Conn) StartTLS(config *tls.Config) error {
|
||||
if l.isTLS {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
|
||||
}
|
||||
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
|
||||
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
|
||||
packet.AppendChild(request)
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
l.Close()
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if resultCode, message := getLDAPResultCode(packet); resultCode == LDAPResultSuccess {
|
||||
conn := tls.Client(l.conn, config)
|
||||
|
||||
if err := conn.Handshake(); err != nil {
|
||||
l.Close()
|
||||
return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", err))
|
||||
}
|
||||
|
||||
l.isTLS = true
|
||||
l.conn = conn
|
||||
} else {
|
||||
return NewError(resultCode, fmt.Errorf("ldap: cannot StartTLS (%s)", message))
|
||||
}
|
||||
go l.reader()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
|
||||
return l.sendMessageWithFlags(packet, 0)
|
||||
}
|
||||
|
||||
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
|
||||
if l.isClosing() {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
|
||||
}
|
||||
l.messageMutex.Lock()
|
||||
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
|
||||
if l.isStartingTLS {
|
||||
l.messageMutex.Unlock()
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
|
||||
}
|
||||
if flags&startTLS != 0 {
|
||||
if l.outstandingRequests != 0 {
|
||||
l.messageMutex.Unlock()
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
|
||||
}
|
||||
l.isStartingTLS = true
|
||||
}
|
||||
l.outstandingRequests++
|
||||
|
||||
l.messageMutex.Unlock()
|
||||
|
||||
responses := make(chan *PacketResponse)
|
||||
messageID := packet.Children[0].Value.(int64)
|
||||
message := &messagePacket{
|
||||
Op: MessageRequest,
|
||||
MessageID: messageID,
|
||||
Packet: packet,
|
||||
Context: &messageContext{
|
||||
id: messageID,
|
||||
done: make(chan struct{}),
|
||||
responses: responses,
|
||||
},
|
||||
}
|
||||
l.sendProcessMessage(message)
|
||||
return message.Context, nil
|
||||
}
|
||||
|
||||
func (l *Conn) finishMessage(msgCtx *messageContext) {
|
||||
close(msgCtx.done)
|
||||
|
||||
if l.isClosing() {
|
||||
return
|
||||
}
|
||||
|
||||
l.messageMutex.Lock()
|
||||
l.outstandingRequests--
|
||||
if l.isStartingTLS {
|
||||
l.isStartingTLS = false
|
||||
}
|
||||
l.messageMutex.Unlock()
|
||||
|
||||
message := &messagePacket{
|
||||
Op: MessageFinish,
|
||||
MessageID: msgCtx.id,
|
||||
}
|
||||
l.sendProcessMessage(message)
|
||||
}
|
||||
|
||||
func (l *Conn) sendProcessMessage(message *messagePacket) bool {
|
||||
l.messageMutex.Lock()
|
||||
defer l.messageMutex.Unlock()
|
||||
if l.isClosing() {
|
||||
return false
|
||||
}
|
||||
l.chanMessage <- message
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *Conn) processMessages() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("ldap: recovered panic in processMessages: %v", err)
|
||||
}
|
||||
for messageID, msgCtx := range l.messageContexts {
|
||||
// If we are closing due to an error, inform anyone who
|
||||
// is waiting about the error.
|
||||
if l.isClosing() && l.closeErr.Load() != nil {
|
||||
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
|
||||
}
|
||||
l.Debug.Printf("Closing channel for MessageID %d", messageID)
|
||||
close(msgCtx.responses)
|
||||
delete(l.messageContexts, messageID)
|
||||
}
|
||||
close(l.chanMessageID)
|
||||
close(l.chanConfirm)
|
||||
}()
|
||||
|
||||
var messageID int64 = 1
|
||||
for {
|
||||
select {
|
||||
case l.chanMessageID <- messageID:
|
||||
messageID++
|
||||
case message := <-l.chanMessage:
|
||||
switch message.Op {
|
||||
case MessageQuit:
|
||||
l.Debug.Printf("Shutting down - quit message received")
|
||||
return
|
||||
case MessageRequest:
|
||||
// Add to message list and write to network
|
||||
l.Debug.Printf("Sending message %d", message.MessageID)
|
||||
|
||||
buf := message.Packet.Bytes()
|
||||
_, err := l.conn.Write(buf)
|
||||
if err != nil {
|
||||
l.Debug.Printf("Error Sending Message: %s", err.Error())
|
||||
message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
|
||||
close(message.Context.responses)
|
||||
break
|
||||
}
|
||||
|
||||
// Only add to messageContexts if we were able to
|
||||
// successfully write the message.
|
||||
l.messageContexts[message.MessageID] = message.Context
|
||||
|
||||
// Add timeout if defined
|
||||
requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout))
|
||||
if requestTimeout > 0 {
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
|
||||
}
|
||||
}()
|
||||
time.Sleep(requestTimeout)
|
||||
timeoutMessage := &messagePacket{
|
||||
Op: MessageTimeout,
|
||||
MessageID: message.MessageID,
|
||||
}
|
||||
l.sendProcessMessage(timeoutMessage)
|
||||
}()
|
||||
}
|
||||
case MessageResponse:
|
||||
l.Debug.Printf("Receiving message %d", message.MessageID)
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
|
||||
} else {
|
||||
log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing())
|
||||
ber.PrintPacket(message.Packet)
|
||||
}
|
||||
case MessageTimeout:
|
||||
// Handle the timeout by closing the channel
|
||||
// All reads will return immediately
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
|
||||
msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
|
||||
delete(l.messageContexts, message.MessageID)
|
||||
close(msgCtx.responses)
|
||||
}
|
||||
case MessageFinish:
|
||||
l.Debug.Printf("Finished message %d", message.MessageID)
|
||||
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
|
||||
delete(l.messageContexts, message.MessageID)
|
||||
close(msgCtx.responses)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Conn) reader() {
|
||||
cleanstop := false
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("ldap: recovered panic in reader: %v", err)
|
||||
}
|
||||
if !cleanstop {
|
||||
l.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if cleanstop {
|
||||
l.Debug.Printf("reader clean stopping (without closing the connection)")
|
||||
return
|
||||
}
|
||||
packet, err := ber.ReadPacket(l.conn)
|
||||
if err != nil {
|
||||
// A read error is expected here if we are closing the connection...
|
||||
if !l.isClosing() {
|
||||
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
|
||||
l.Debug.Printf("reader error: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
addLDAPDescriptions(packet)
|
||||
if len(packet.Children) == 0 {
|
||||
l.Debug.Printf("Received bad ldap packet")
|
||||
continue
|
||||
}
|
||||
l.messageMutex.Lock()
|
||||
if l.isStartingTLS {
|
||||
cleanstop = true
|
||||
}
|
||||
l.messageMutex.Unlock()
|
||||
message := &messagePacket{
|
||||
Op: MessageResponse,
|
||||
MessageID: packet.Children[0].Value.(int64),
|
||||
Packet: packet,
|
||||
}
|
||||
if !l.sendProcessMessage(message) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,420 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
|
||||
ControlTypePaging = "1.2.840.113556.1.4.319"
|
||||
// ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
|
||||
ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
|
||||
// ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
|
||||
// ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
|
||||
// ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
|
||||
ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
|
||||
)
|
||||
|
||||
// ControlTypeMap maps controls to text descriptions
|
||||
var ControlTypeMap = map[string]string{
|
||||
ControlTypePaging: "Paging",
|
||||
ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
|
||||
ControlTypeManageDsaIT: "Manage DSA IT",
|
||||
}
|
||||
|
||||
// Control defines an interface controls provide to encode and describe themselves
|
||||
type Control interface {
|
||||
// GetControlType returns the OID
|
||||
GetControlType() string
|
||||
// Encode returns the ber packet representation
|
||||
Encode() *ber.Packet
|
||||
// String returns a human-readable description
|
||||
String() string
|
||||
}
|
||||
|
||||
// ControlString implements the Control interface for simple controls
|
||||
type ControlString struct {
|
||||
ControlType string
|
||||
Criticality bool
|
||||
ControlValue string
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlString) GetControlType() string {
|
||||
return c.ControlType
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlString) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
|
||||
if c.Criticality {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlString) String() string {
|
||||
return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
|
||||
}
|
||||
|
||||
// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
|
||||
type ControlPaging struct {
|
||||
// PagingSize indicates the page size
|
||||
PagingSize uint32
|
||||
// Cookie is an opaque value returned by the server to track a paging cursor
|
||||
Cookie []byte
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlPaging) GetControlType() string {
|
||||
return ControlTypePaging
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlPaging) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
|
||||
|
||||
p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
|
||||
seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
|
||||
cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
|
||||
cookie.Value = c.Cookie
|
||||
cookie.Data.Write(c.Cookie)
|
||||
seq.AppendChild(cookie)
|
||||
p2.AppendChild(seq)
|
||||
|
||||
packet.AppendChild(p2)
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlPaging) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
|
||||
ControlTypeMap[ControlTypePaging],
|
||||
ControlTypePaging,
|
||||
false,
|
||||
c.PagingSize,
|
||||
c.Cookie)
|
||||
}
|
||||
|
||||
// SetCookie stores the given cookie in the paging control
|
||||
func (c *ControlPaging) SetCookie(cookie []byte) {
|
||||
c.Cookie = cookie
|
||||
}
|
||||
|
||||
// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
|
||||
type ControlBeheraPasswordPolicy struct {
|
||||
// Expire contains the number of seconds before a password will expire
|
||||
Expire int64
|
||||
// Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
|
||||
Grace int64
|
||||
// Error indicates the error code
|
||||
Error int8
|
||||
// ErrorString is a human readable error
|
||||
ErrorString string
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlBeheraPasswordPolicy) GetControlType() string {
|
||||
return ControlTypeBeheraPasswordPolicy
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
|
||||
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlBeheraPasswordPolicy) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
|
||||
ControlTypeMap[ControlTypeBeheraPasswordPolicy],
|
||||
ControlTypeBeheraPasswordPolicy,
|
||||
false,
|
||||
c.Expire,
|
||||
c.Grace,
|
||||
c.Error,
|
||||
c.ErrorString)
|
||||
}
|
||||
|
||||
// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
type ControlVChuPasswordMustChange struct {
|
||||
// MustChange indicates if the password is required to be changed
|
||||
MustChange bool
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlVChuPasswordMustChange) GetControlType() string {
|
||||
return ControlTypeVChuPasswordMustChange
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlVChuPasswordMustChange) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t MustChange: %v",
|
||||
ControlTypeMap[ControlTypeVChuPasswordMustChange],
|
||||
ControlTypeVChuPasswordMustChange,
|
||||
false,
|
||||
c.MustChange)
|
||||
}
|
||||
|
||||
// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
|
||||
type ControlVChuPasswordWarning struct {
|
||||
// Expire indicates the time in seconds until the password expires
|
||||
Expire int64
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlVChuPasswordWarning) GetControlType() string {
|
||||
return ControlTypeVChuPasswordWarning
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlVChuPasswordWarning) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t Expire: %b",
|
||||
ControlTypeMap[ControlTypeVChuPasswordWarning],
|
||||
ControlTypeVChuPasswordWarning,
|
||||
false,
|
||||
c.Expire)
|
||||
}
|
||||
|
||||
// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
|
||||
type ControlManageDsaIT struct {
|
||||
// Criticality indicates if this control is required
|
||||
Criticality bool
|
||||
}
|
||||
|
||||
// GetControlType returns the OID
|
||||
func (c *ControlManageDsaIT) GetControlType() string {
|
||||
return ControlTypeManageDsaIT
|
||||
}
|
||||
|
||||
// Encode returns the ber packet representation
|
||||
func (c *ControlManageDsaIT) Encode() *ber.Packet {
|
||||
//FIXME
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
|
||||
if c.Criticality {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
|
||||
}
|
||||
return packet
|
||||
}
|
||||
|
||||
// String returns a human-readable description
|
||||
func (c *ControlManageDsaIT) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Control Type: %s (%q) Criticality: %t",
|
||||
ControlTypeMap[ControlTypeManageDsaIT],
|
||||
ControlTypeManageDsaIT,
|
||||
c.Criticality)
|
||||
}
|
||||
|
||||
// NewControlManageDsaIT returns a ControlManageDsaIT control
|
||||
func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
|
||||
return &ControlManageDsaIT{Criticality: Criticality}
|
||||
}
|
||||
|
||||
// FindControl returns the first control of the given type in the list, or nil
|
||||
func FindControl(controls []Control, controlType string) Control {
|
||||
for _, c := range controls {
|
||||
if c.GetControlType() == controlType {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
|
||||
func DecodeControl(packet *ber.Packet) Control {
|
||||
var (
|
||||
ControlType = ""
|
||||
Criticality = false
|
||||
value *ber.Packet
|
||||
)
|
||||
|
||||
switch len(packet.Children) {
|
||||
case 0:
|
||||
// at least one child is required for control type
|
||||
return nil
|
||||
|
||||
case 1:
|
||||
// just type, no criticality or value
|
||||
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
|
||||
ControlType = packet.Children[0].Value.(string)
|
||||
|
||||
case 2:
|
||||
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
|
||||
ControlType = packet.Children[0].Value.(string)
|
||||
|
||||
// Children[1] could be criticality or value (both are optional)
|
||||
// duck-type on whether this is a boolean
|
||||
if _, ok := packet.Children[1].Value.(bool); ok {
|
||||
packet.Children[1].Description = "Criticality"
|
||||
Criticality = packet.Children[1].Value.(bool)
|
||||
} else {
|
||||
packet.Children[1].Description = "Control Value"
|
||||
value = packet.Children[1]
|
||||
}
|
||||
|
||||
case 3:
|
||||
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
|
||||
ControlType = packet.Children[0].Value.(string)
|
||||
|
||||
packet.Children[1].Description = "Criticality"
|
||||
Criticality = packet.Children[1].Value.(bool)
|
||||
|
||||
packet.Children[2].Description = "Control Value"
|
||||
value = packet.Children[2]
|
||||
|
||||
default:
|
||||
// more than 3 children is invalid
|
||||
return nil
|
||||
}
|
||||
|
||||
switch ControlType {
|
||||
case ControlTypeManageDsaIT:
|
||||
return NewControlManageDsaIT(Criticality)
|
||||
case ControlTypePaging:
|
||||
value.Description += " (Paging)"
|
||||
c := new(ControlPaging)
|
||||
if value.Value != nil {
|
||||
valueChildren := ber.DecodePacket(value.Data.Bytes())
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
value = value.Children[0]
|
||||
value.Description = "Search Control Value"
|
||||
value.Children[0].Description = "Paging Size"
|
||||
value.Children[1].Description = "Cookie"
|
||||
c.PagingSize = uint32(value.Children[0].Value.(int64))
|
||||
c.Cookie = value.Children[1].Data.Bytes()
|
||||
value.Children[1].Value = c.Cookie
|
||||
return c
|
||||
case ControlTypeBeheraPasswordPolicy:
|
||||
value.Description += " (Password Policy - Behera)"
|
||||
c := NewControlBeheraPasswordPolicy()
|
||||
if value.Value != nil {
|
||||
valueChildren := ber.DecodePacket(value.Data.Bytes())
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
|
||||
sequence := value.Children[0]
|
||||
|
||||
for _, child := range sequence.Children {
|
||||
if child.Tag == 0 {
|
||||
//Warning
|
||||
warningPacket := child.Children[0]
|
||||
packet := ber.DecodePacket(warningPacket.Data.Bytes())
|
||||
val, ok := packet.Value.(int64)
|
||||
if ok {
|
||||
if warningPacket.Tag == 0 {
|
||||
//timeBeforeExpiration
|
||||
c.Expire = val
|
||||
warningPacket.Value = c.Expire
|
||||
} else if warningPacket.Tag == 1 {
|
||||
//graceAuthNsRemaining
|
||||
c.Grace = val
|
||||
warningPacket.Value = c.Grace
|
||||
}
|
||||
}
|
||||
} else if child.Tag == 1 {
|
||||
// Error
|
||||
packet := ber.DecodePacket(child.Data.Bytes())
|
||||
val, ok := packet.Value.(int8)
|
||||
if !ok {
|
||||
// what to do?
|
||||
val = -1
|
||||
}
|
||||
c.Error = val
|
||||
child.Value = c.Error
|
||||
c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
|
||||
}
|
||||
}
|
||||
return c
|
||||
case ControlTypeVChuPasswordMustChange:
|
||||
c := &ControlVChuPasswordMustChange{MustChange: true}
|
||||
return c
|
||||
case ControlTypeVChuPasswordWarning:
|
||||
c := &ControlVChuPasswordWarning{Expire: -1}
|
||||
expireStr := ber.DecodeString(value.Data.Bytes())
|
||||
|
||||
expire, err := strconv.ParseInt(expireStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
c.Expire = expire
|
||||
value.Value = c.Expire
|
||||
|
||||
return c
|
||||
default:
|
||||
c := new(ControlString)
|
||||
c.ControlType = ControlType
|
||||
c.Criticality = Criticality
|
||||
if value != nil {
|
||||
c.ControlValue = value.Value.(string)
|
||||
}
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
// NewControlString returns a generic control
|
||||
func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
|
||||
return &ControlString{
|
||||
ControlType: controlType,
|
||||
Criticality: criticality,
|
||||
ControlValue: controlValue,
|
||||
}
|
||||
}
|
||||
|
||||
// NewControlPaging returns a paging control
|
||||
func NewControlPaging(pagingSize uint32) *ControlPaging {
|
||||
return &ControlPaging{PagingSize: pagingSize}
|
||||
}
|
||||
|
||||
// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
|
||||
func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
|
||||
return &ControlBeheraPasswordPolicy{
|
||||
Expire: -1,
|
||||
Grace: -1,
|
||||
Error: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func encodeControls(controls []Control) *ber.Packet {
|
||||
packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
|
||||
for _, control := range controls {
|
||||
packet.AppendChild(control.Encode())
|
||||
}
|
||||
return packet
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package ldap
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// debugging type
|
||||
// - has a Printf method to write the debug output
|
||||
type debugging bool
|
||||
|
||||
// write debug output
|
||||
func (debug debugging) Printf(format string, args ...interface{}) {
|
||||
if debug {
|
||||
log.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (debug debugging) PrintPacket(packet *ber.Packet) {
|
||||
if debug {
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
//
|
||||
// https://tools.ietf.org/html/rfc4511
|
||||
//
|
||||
// DelRequest ::= [APPLICATION 10] LDAPDN
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// DelRequest implements an LDAP deletion request
|
||||
type DelRequest struct {
|
||||
// DN is the name of the directory entry to delete
|
||||
DN string
|
||||
// Controls hold optional controls to send with the request
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (d DelRequest) encode() *ber.Packet {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request")
|
||||
request.Data.Write([]byte(d.DN))
|
||||
return request
|
||||
}
|
||||
|
||||
// NewDelRequest creates a delete request for the given DN and controls
|
||||
func NewDelRequest(DN string,
|
||||
Controls []Control) *DelRequest {
|
||||
return &DelRequest{
|
||||
DN: DN,
|
||||
Controls: Controls,
|
||||
}
|
||||
}
|
||||
|
||||
// Del executes the given delete request
|
||||
func (l *Conn) Del(delRequest *DelRequest) error {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
packet.AppendChild(delRequest.encode())
|
||||
if delRequest.Controls != nil {
|
||||
packet.AppendChild(encodeControls(delRequest.Controls))
|
||||
}
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationDelResponse {
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,247 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// File contains DN parsing functionality
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc4514
|
||||
//
|
||||
// distinguishedName = [ relativeDistinguishedName
|
||||
// *( COMMA relativeDistinguishedName ) ]
|
||||
// relativeDistinguishedName = attributeTypeAndValue
|
||||
// *( PLUS attributeTypeAndValue )
|
||||
// attributeTypeAndValue = attributeType EQUALS attributeValue
|
||||
// attributeType = descr / numericoid
|
||||
// attributeValue = string / hexstring
|
||||
//
|
||||
// ; The following characters are to be escaped when they appear
|
||||
// ; in the value to be encoded: ESC, one of <escaped>, leading
|
||||
// ; SHARP or SPACE, trailing SPACE, and NULL.
|
||||
// string = [ ( leadchar / pair ) [ *( stringchar / pair )
|
||||
// ( trailchar / pair ) ] ]
|
||||
//
|
||||
// leadchar = LUTF1 / UTFMB
|
||||
// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A /
|
||||
// %x3D / %x3F-5B / %x5D-7F
|
||||
//
|
||||
// trailchar = TUTF1 / UTFMB
|
||||
// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A /
|
||||
// %x3D / %x3F-5B / %x5D-7F
|
||||
//
|
||||
// stringchar = SUTF1 / UTFMB
|
||||
// SUTF1 = %x01-21 / %x23-2A / %x2D-3A /
|
||||
// %x3D / %x3F-5B / %x5D-7F
|
||||
//
|
||||
// pair = ESC ( ESC / special / hexpair )
|
||||
// special = escaped / SPACE / SHARP / EQUALS
|
||||
// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
|
||||
// hexstring = SHARP 1*hexpair
|
||||
// hexpair = HEX HEX
|
||||
//
|
||||
// where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>,
|
||||
// <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>,
|
||||
// <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512].
|
||||
//
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
enchex "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
|
||||
type AttributeTypeAndValue struct {
|
||||
// Type is the attribute type
|
||||
Type string
|
||||
// Value is the attribute value
|
||||
Value string
|
||||
}
|
||||
|
||||
// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
|
||||
type RelativeDN struct {
|
||||
Attributes []*AttributeTypeAndValue
|
||||
}
|
||||
|
||||
// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
|
||||
type DN struct {
|
||||
RDNs []*RelativeDN
|
||||
}
|
||||
|
||||
// ParseDN returns a distinguishedName or an error
|
||||
func ParseDN(str string) (*DN, error) {
|
||||
dn := new(DN)
|
||||
dn.RDNs = make([]*RelativeDN, 0)
|
||||
rdn := new(RelativeDN)
|
||||
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
|
||||
buffer := bytes.Buffer{}
|
||||
attribute := new(AttributeTypeAndValue)
|
||||
escaping := false
|
||||
|
||||
unescapedTrailingSpaces := 0
|
||||
stringFromBuffer := func() string {
|
||||
s := buffer.String()
|
||||
s = s[0 : len(s)-unescapedTrailingSpaces]
|
||||
buffer.Reset()
|
||||
unescapedTrailingSpaces = 0
|
||||
return s
|
||||
}
|
||||
|
||||
for i := 0; i < len(str); i++ {
|
||||
char := str[i]
|
||||
if escaping {
|
||||
unescapedTrailingSpaces = 0
|
||||
escaping = false
|
||||
switch char {
|
||||
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
|
||||
buffer.WriteByte(char)
|
||||
continue
|
||||
}
|
||||
// Not a special character, assume hex encoded octet
|
||||
if len(str) == i+1 {
|
||||
return nil, errors.New("Got corrupted escaped character")
|
||||
}
|
||||
|
||||
dst := []byte{0}
|
||||
n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to decode escaped character: %s", err)
|
||||
} else if n != 1 {
|
||||
return nil, fmt.Errorf("Expected 1 byte when un-escaping, got %d", n)
|
||||
}
|
||||
buffer.WriteByte(dst[0])
|
||||
i++
|
||||
} else if char == '\\' {
|
||||
unescapedTrailingSpaces = 0
|
||||
escaping = true
|
||||
} else if char == '=' {
|
||||
attribute.Type = stringFromBuffer()
|
||||
// Special case: If the first character in the value is # the
|
||||
// following data is BER encoded so we can just fast forward
|
||||
// and decode.
|
||||
if len(str) > i+1 && str[i+1] == '#' {
|
||||
i += 2
|
||||
index := strings.IndexAny(str[i:], ",+")
|
||||
data := str
|
||||
if index > 0 {
|
||||
data = str[i : i+index]
|
||||
} else {
|
||||
data = str[i:]
|
||||
}
|
||||
rawBER, err := enchex.DecodeString(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to decode BER encoding: %s", err)
|
||||
}
|
||||
packet := ber.DecodePacket(rawBER)
|
||||
buffer.WriteString(packet.Data.String())
|
||||
i += len(data) - 1
|
||||
}
|
||||
} else if char == ',' || char == '+' {
|
||||
// We're done with this RDN or value, push it
|
||||
if len(attribute.Type) == 0 {
|
||||
return nil, errors.New("incomplete type, value pair")
|
||||
}
|
||||
attribute.Value = stringFromBuffer()
|
||||
rdn.Attributes = append(rdn.Attributes, attribute)
|
||||
attribute = new(AttributeTypeAndValue)
|
||||
if char == ',' {
|
||||
dn.RDNs = append(dn.RDNs, rdn)
|
||||
rdn = new(RelativeDN)
|
||||
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
|
||||
}
|
||||
} else if char == ' ' && buffer.Len() == 0 {
|
||||
// ignore unescaped leading spaces
|
||||
continue
|
||||
} else {
|
||||
if char == ' ' {
|
||||
// Track unescaped spaces in case they are trailing and we need to remove them
|
||||
unescapedTrailingSpaces++
|
||||
} else {
|
||||
// Reset if we see a non-space char
|
||||
unescapedTrailingSpaces = 0
|
||||
}
|
||||
buffer.WriteByte(char)
|
||||
}
|
||||
}
|
||||
if buffer.Len() > 0 {
|
||||
if len(attribute.Type) == 0 {
|
||||
return nil, errors.New("DN ended with incomplete type, value pair")
|
||||
}
|
||||
attribute.Value = stringFromBuffer()
|
||||
rdn.Attributes = append(rdn.Attributes, attribute)
|
||||
dn.RDNs = append(dn.RDNs, rdn)
|
||||
}
|
||||
return dn, nil
|
||||
}
|
||||
|
||||
// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Returns true if they have the same number of relative distinguished names
|
||||
// and corresponding relative distinguished names (by position) are the same.
|
||||
func (d *DN) Equal(other *DN) bool {
|
||||
if len(d.RDNs) != len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].Equal(other.RDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
|
||||
// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
|
||||
// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
|
||||
// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
|
||||
func (d *DN) AncestorOf(other *DN) bool {
|
||||
if len(d.RDNs) >= len(other.RDNs) {
|
||||
return false
|
||||
}
|
||||
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
|
||||
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
|
||||
for i := range d.RDNs {
|
||||
if !d.RDNs[i].Equal(otherRDNs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
|
||||
// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
|
||||
// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
|
||||
// The order of attributes is not significant.
|
||||
// Case of attribute types is not significant.
|
||||
func (r *RelativeDN) Equal(other *RelativeDN) bool {
|
||||
if len(r.Attributes) != len(other.Attributes) {
|
||||
return false
|
||||
}
|
||||
return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
|
||||
}
|
||||
|
||||
func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
|
||||
for _, attr := range attrs {
|
||||
found := false
|
||||
for _, myattr := range r.Attributes {
|
||||
if myattr.Equal(attr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
|
||||
// Case of the attribute type is not significant
|
||||
func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
|
||||
return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
/*
|
||||
Package ldap provides basic LDAP v3 functionality.
|
||||
*/
|
||||
package ldap
|
|
@ -0,0 +1,155 @@
|
|||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// LDAP Result Codes
|
||||
const (
|
||||
LDAPResultSuccess = 0
|
||||
LDAPResultOperationsError = 1
|
||||
LDAPResultProtocolError = 2
|
||||
LDAPResultTimeLimitExceeded = 3
|
||||
LDAPResultSizeLimitExceeded = 4
|
||||
LDAPResultCompareFalse = 5
|
||||
LDAPResultCompareTrue = 6
|
||||
LDAPResultAuthMethodNotSupported = 7
|
||||
LDAPResultStrongAuthRequired = 8
|
||||
LDAPResultReferral = 10
|
||||
LDAPResultAdminLimitExceeded = 11
|
||||
LDAPResultUnavailableCriticalExtension = 12
|
||||
LDAPResultConfidentialityRequired = 13
|
||||
LDAPResultSaslBindInProgress = 14
|
||||
LDAPResultNoSuchAttribute = 16
|
||||
LDAPResultUndefinedAttributeType = 17
|
||||
LDAPResultInappropriateMatching = 18
|
||||
LDAPResultConstraintViolation = 19
|
||||
LDAPResultAttributeOrValueExists = 20
|
||||
LDAPResultInvalidAttributeSyntax = 21
|
||||
LDAPResultNoSuchObject = 32
|
||||
LDAPResultAliasProblem = 33
|
||||
LDAPResultInvalidDNSyntax = 34
|
||||
LDAPResultAliasDereferencingProblem = 36
|
||||
LDAPResultInappropriateAuthentication = 48
|
||||
LDAPResultInvalidCredentials = 49
|
||||
LDAPResultInsufficientAccessRights = 50
|
||||
LDAPResultBusy = 51
|
||||
LDAPResultUnavailable = 52
|
||||
LDAPResultUnwillingToPerform = 53
|
||||
LDAPResultLoopDetect = 54
|
||||
LDAPResultNamingViolation = 64
|
||||
LDAPResultObjectClassViolation = 65
|
||||
LDAPResultNotAllowedOnNonLeaf = 66
|
||||
LDAPResultNotAllowedOnRDN = 67
|
||||
LDAPResultEntryAlreadyExists = 68
|
||||
LDAPResultObjectClassModsProhibited = 69
|
||||
LDAPResultAffectsMultipleDSAs = 71
|
||||
LDAPResultOther = 80
|
||||
|
||||
ErrorNetwork = 200
|
||||
ErrorFilterCompile = 201
|
||||
ErrorFilterDecompile = 202
|
||||
ErrorDebugging = 203
|
||||
ErrorUnexpectedMessage = 204
|
||||
ErrorUnexpectedResponse = 205
|
||||
)
|
||||
|
||||
// LDAPResultCodeMap contains string descriptions for LDAP error codes
|
||||
var LDAPResultCodeMap = map[uint8]string{
|
||||
LDAPResultSuccess: "Success",
|
||||
LDAPResultOperationsError: "Operations Error",
|
||||
LDAPResultProtocolError: "Protocol Error",
|
||||
LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
|
||||
LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
|
||||
LDAPResultCompareFalse: "Compare False",
|
||||
LDAPResultCompareTrue: "Compare True",
|
||||
LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
|
||||
LDAPResultStrongAuthRequired: "Strong Auth Required",
|
||||
LDAPResultReferral: "Referral",
|
||||
LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
|
||||
LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
|
||||
LDAPResultConfidentialityRequired: "Confidentiality Required",
|
||||
LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
|
||||
LDAPResultNoSuchAttribute: "No Such Attribute",
|
||||
LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
|
||||
LDAPResultInappropriateMatching: "Inappropriate Matching",
|
||||
LDAPResultConstraintViolation: "Constraint Violation",
|
||||
LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
|
||||
LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
|
||||
LDAPResultNoSuchObject: "No Such Object",
|
||||
LDAPResultAliasProblem: "Alias Problem",
|
||||
LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
|
||||
LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
|
||||
LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
|
||||
LDAPResultInvalidCredentials: "Invalid Credentials",
|
||||
LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
|
||||
LDAPResultBusy: "Busy",
|
||||
LDAPResultUnavailable: "Unavailable",
|
||||
LDAPResultUnwillingToPerform: "Unwilling To Perform",
|
||||
LDAPResultLoopDetect: "Loop Detect",
|
||||
LDAPResultNamingViolation: "Naming Violation",
|
||||
LDAPResultObjectClassViolation: "Object Class Violation",
|
||||
LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
|
||||
LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
|
||||
LDAPResultEntryAlreadyExists: "Entry Already Exists",
|
||||
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
|
||||
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
|
||||
LDAPResultOther: "Other",
|
||||
|
||||
ErrorNetwork: "Network Error",
|
||||
ErrorFilterCompile: "Filter Compile Error",
|
||||
ErrorFilterDecompile: "Filter Decompile Error",
|
||||
ErrorDebugging: "Debugging Error",
|
||||
ErrorUnexpectedMessage: "Unexpected Message",
|
||||
ErrorUnexpectedResponse: "Unexpected Response",
|
||||
}
|
||||
|
||||
func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
|
||||
if packet == nil {
|
||||
return ErrorUnexpectedResponse, "Empty packet"
|
||||
} else if len(packet.Children) >= 2 {
|
||||
response := packet.Children[1]
|
||||
if response == nil {
|
||||
return ErrorUnexpectedResponse, "Empty response in packet"
|
||||
}
|
||||
if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
|
||||
// Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist as seen here: https://tools.ietf.org/html/rfc4511#section-4.1.9
|
||||
return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string)
|
||||
}
|
||||
}
|
||||
|
||||
return ErrorNetwork, "Invalid packet format"
|
||||
}
|
||||
|
||||
// Error holds LDAP error information
|
||||
type Error struct {
|
||||
// Err is the underlying error
|
||||
Err error
|
||||
// ResultCode is the LDAP error code
|
||||
ResultCode uint8
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
|
||||
}
|
||||
|
||||
// NewError creates an LDAP error with the given code and underlying error
|
||||
func NewError(resultCode uint8, err error) error {
|
||||
return &Error{ResultCode: resultCode, Err: err}
|
||||
}
|
||||
|
||||
// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
|
||||
func IsErrorWithCode(err error, desiredResultCode uint8) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
serverError, ok := err.(*Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return serverError.ResultCode == desiredResultCode
|
||||
}
|
|
@ -0,0 +1,469 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
hexpac "encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// Filter choices
|
||||
const (
|
||||
FilterAnd = 0
|
||||
FilterOr = 1
|
||||
FilterNot = 2
|
||||
FilterEqualityMatch = 3
|
||||
FilterSubstrings = 4
|
||||
FilterGreaterOrEqual = 5
|
||||
FilterLessOrEqual = 6
|
||||
FilterPresent = 7
|
||||
FilterApproxMatch = 8
|
||||
FilterExtensibleMatch = 9
|
||||
)
|
||||
|
||||
// FilterMap contains human readable descriptions of Filter choices
|
||||
var FilterMap = map[uint64]string{
|
||||
FilterAnd: "And",
|
||||
FilterOr: "Or",
|
||||
FilterNot: "Not",
|
||||
FilterEqualityMatch: "Equality Match",
|
||||
FilterSubstrings: "Substrings",
|
||||
FilterGreaterOrEqual: "Greater Or Equal",
|
||||
FilterLessOrEqual: "Less Or Equal",
|
||||
FilterPresent: "Present",
|
||||
FilterApproxMatch: "Approx Match",
|
||||
FilterExtensibleMatch: "Extensible Match",
|
||||
}
|
||||
|
||||
// SubstringFilter options
|
||||
const (
|
||||
FilterSubstringsInitial = 0
|
||||
FilterSubstringsAny = 1
|
||||
FilterSubstringsFinal = 2
|
||||
)
|
||||
|
||||
// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
|
||||
var FilterSubstringsMap = map[uint64]string{
|
||||
FilterSubstringsInitial: "Substrings Initial",
|
||||
FilterSubstringsAny: "Substrings Any",
|
||||
FilterSubstringsFinal: "Substrings Final",
|
||||
}
|
||||
|
||||
// MatchingRuleAssertion choices
|
||||
const (
|
||||
MatchingRuleAssertionMatchingRule = 1
|
||||
MatchingRuleAssertionType = 2
|
||||
MatchingRuleAssertionMatchValue = 3
|
||||
MatchingRuleAssertionDNAttributes = 4
|
||||
)
|
||||
|
||||
// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
|
||||
var MatchingRuleAssertionMap = map[uint64]string{
|
||||
MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
|
||||
MatchingRuleAssertionType: "Matching Rule Assertion Type",
|
||||
MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
|
||||
MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
|
||||
}
|
||||
|
||||
// CompileFilter converts a string representation of a filter into a BER-encoded packet
|
||||
func CompileFilter(filter string) (*ber.Packet, error) {
|
||||
if len(filter) == 0 || filter[0] != '(' {
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
|
||||
}
|
||||
packet, pos, err := compileFilter(filter, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case pos > len(filter):
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
case pos < len(filter):
|
||||
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
|
||||
}
|
||||
return packet, nil
|
||||
}
|
||||
|
||||
// DecompileFilter converts a packet representation of a filter into a string representation
|
||||
func DecompileFilter(packet *ber.Packet) (ret string, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
|
||||
}
|
||||
}()
|
||||
ret = "("
|
||||
err = nil
|
||||
childStr := ""
|
||||
|
||||
switch packet.Tag {
|
||||
case FilterAnd:
|
||||
ret += "&"
|
||||
for _, child := range packet.Children {
|
||||
childStr, err = DecompileFilter(child)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ret += childStr
|
||||
}
|
||||
case FilterOr:
|
||||
ret += "|"
|
||||
for _, child := range packet.Children {
|
||||
childStr, err = DecompileFilter(child)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ret += childStr
|
||||
}
|
||||
case FilterNot:
|
||||
ret += "!"
|
||||
childStr, err = DecompileFilter(packet.Children[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ret += childStr
|
||||
|
||||
case FilterSubstrings:
|
||||
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
|
||||
ret += "="
|
||||
for i, child := range packet.Children[1].Children {
|
||||
if i == 0 && child.Tag != FilterSubstringsInitial {
|
||||
ret += "*"
|
||||
}
|
||||
ret += EscapeFilter(ber.DecodeString(child.Data.Bytes()))
|
||||
if child.Tag != FilterSubstringsFinal {
|
||||
ret += "*"
|
||||
}
|
||||
}
|
||||
case FilterEqualityMatch:
|
||||
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
|
||||
ret += "="
|
||||
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
|
||||
case FilterGreaterOrEqual:
|
||||
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
|
||||
ret += ">="
|
||||
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
|
||||
case FilterLessOrEqual:
|
||||
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
|
||||
ret += "<="
|
||||
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
|
||||
case FilterPresent:
|
||||
ret += ber.DecodeString(packet.Data.Bytes())
|
||||
ret += "=*"
|
||||
case FilterApproxMatch:
|
||||
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
|
||||
ret += "~="
|
||||
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
|
||||
case FilterExtensibleMatch:
|
||||
attr := ""
|
||||
dnAttributes := false
|
||||
matchingRule := ""
|
||||
value := ""
|
||||
|
||||
for _, child := range packet.Children {
|
||||
switch child.Tag {
|
||||
case MatchingRuleAssertionMatchingRule:
|
||||
matchingRule = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionType:
|
||||
attr = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionMatchValue:
|
||||
value = ber.DecodeString(child.Data.Bytes())
|
||||
case MatchingRuleAssertionDNAttributes:
|
||||
dnAttributes = child.Value.(bool)
|
||||
}
|
||||
}
|
||||
|
||||
if len(attr) > 0 {
|
||||
ret += attr
|
||||
}
|
||||
if dnAttributes {
|
||||
ret += ":dn"
|
||||
}
|
||||
if len(matchingRule) > 0 {
|
||||
ret += ":"
|
||||
ret += matchingRule
|
||||
}
|
||||
ret += ":="
|
||||
ret += EscapeFilter(value)
|
||||
}
|
||||
|
||||
ret += ")"
|
||||
return
|
||||
}
|
||||
|
||||
func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
|
||||
for pos < len(filter) && filter[pos] == '(' {
|
||||
child, newPos, err := compileFilter(filter, pos+1)
|
||||
if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
pos = newPos
|
||||
parent.AppendChild(child)
|
||||
}
|
||||
if pos == len(filter) {
|
||||
return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
}
|
||||
|
||||
return pos + 1, nil
|
||||
}
|
||||
|
||||
func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
|
||||
var (
|
||||
packet *ber.Packet
|
||||
err error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
|
||||
}
|
||||
}()
|
||||
newPos := pos
|
||||
|
||||
currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
|
||||
|
||||
switch currentRune {
|
||||
case utf8.RuneError:
|
||||
return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
|
||||
case '(':
|
||||
packet, newPos, err = compileFilter(filter, pos+currentWidth)
|
||||
newPos++
|
||||
return packet, newPos, err
|
||||
case '&':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
|
||||
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
|
||||
return packet, newPos, err
|
||||
case '|':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
|
||||
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
|
||||
return packet, newPos, err
|
||||
case '!':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
|
||||
var child *ber.Packet
|
||||
child, newPos, err = compileFilter(filter, pos+currentWidth)
|
||||
packet.AppendChild(child)
|
||||
return packet, newPos, err
|
||||
default:
|
||||
const (
|
||||
stateReadingAttr = 0
|
||||
stateReadingExtensibleMatchingRule = 1
|
||||
stateReadingCondition = 2
|
||||
)
|
||||
|
||||
state := stateReadingAttr
|
||||
|
||||
attribute := ""
|
||||
extensibleDNAttributes := false
|
||||
extensibleMatchingRule := ""
|
||||
condition := ""
|
||||
|
||||
for newPos < len(filter) {
|
||||
remainingFilter := filter[newPos:]
|
||||
currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
|
||||
if currentRune == ')' {
|
||||
break
|
||||
}
|
||||
if currentRune == utf8.RuneError {
|
||||
return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
|
||||
}
|
||||
|
||||
switch state {
|
||||
case stateReadingAttr:
|
||||
switch {
|
||||
// Extensible rule, with only DN-matching
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
extensibleDNAttributes = true
|
||||
state = stateReadingCondition
|
||||
newPos += 5
|
||||
|
||||
// Extensible rule, with DN-matching and a matching OID
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
extensibleDNAttributes = true
|
||||
state = stateReadingExtensibleMatchingRule
|
||||
newPos += 4
|
||||
|
||||
// Extensible rule, with attr only
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Extensible rule, with no DN attribute matching
|
||||
case currentRune == ':':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
|
||||
state = stateReadingExtensibleMatchingRule
|
||||
newPos++
|
||||
|
||||
// Equality condition
|
||||
case currentRune == '=':
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
|
||||
state = stateReadingCondition
|
||||
newPos++
|
||||
|
||||
// Greater-than or equal
|
||||
case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Less-than or equal
|
||||
case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Approx
|
||||
case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
|
||||
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Still reading the attribute name
|
||||
default:
|
||||
attribute += fmt.Sprintf("%c", currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
|
||||
case stateReadingExtensibleMatchingRule:
|
||||
switch {
|
||||
|
||||
// Matching rule OID is done
|
||||
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
|
||||
state = stateReadingCondition
|
||||
newPos += 2
|
||||
|
||||
// Still reading the matching rule oid
|
||||
default:
|
||||
extensibleMatchingRule += fmt.Sprintf("%c", currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
|
||||
case stateReadingCondition:
|
||||
// append to the condition
|
||||
condition += fmt.Sprintf("%c", currentRune)
|
||||
newPos += currentWidth
|
||||
}
|
||||
}
|
||||
|
||||
if newPos == len(filter) {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
|
||||
return packet, newPos, err
|
||||
}
|
||||
if packet == nil {
|
||||
err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
|
||||
return packet, newPos, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case packet.Tag == FilterExtensibleMatch:
|
||||
// MatchingRuleAssertion ::= SEQUENCE {
|
||||
// matchingRule [1] MatchingRuleID OPTIONAL,
|
||||
// type [2] AttributeDescription OPTIONAL,
|
||||
// matchValue [3] AssertionValue,
|
||||
// dnAttributes [4] BOOLEAN DEFAULT FALSE
|
||||
// }
|
||||
|
||||
// Include the matching rule oid, if specified
|
||||
if len(extensibleMatchingRule) > 0 {
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
|
||||
}
|
||||
|
||||
// Include the attribute, if specified
|
||||
if len(attribute) > 0 {
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType]))
|
||||
}
|
||||
|
||||
// Add the value (only required child)
|
||||
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
|
||||
|
||||
// Defaults to false, so only include in the sequence if true
|
||||
if extensibleDNAttributes {
|
||||
packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
|
||||
}
|
||||
|
||||
case packet.Tag == FilterEqualityMatch && condition == "*":
|
||||
packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent])
|
||||
case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"):
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
|
||||
packet.Tag = FilterSubstrings
|
||||
packet.Description = FilterMap[uint64(packet.Tag)]
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
|
||||
parts := strings.Split(condition, "*")
|
||||
for i, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
var tag ber.Tag
|
||||
switch i {
|
||||
case 0:
|
||||
tag = FilterSubstringsInitial
|
||||
case len(parts) - 1:
|
||||
tag = FilterSubstringsFinal
|
||||
default:
|
||||
tag = FilterSubstringsAny
|
||||
}
|
||||
encodedString, encodeErr := escapedStringToEncodedBytes(part)
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
|
||||
}
|
||||
packet.AppendChild(seq)
|
||||
default:
|
||||
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
|
||||
if encodeErr != nil {
|
||||
return packet, newPos, encodeErr
|
||||
}
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
|
||||
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
|
||||
}
|
||||
|
||||
newPos += currentWidth
|
||||
return packet, newPos, err
|
||||
}
|
||||
}
|
||||
|
||||
// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
|
||||
func escapedStringToEncodedBytes(escapedString string) (string, error) {
|
||||
var buffer bytes.Buffer
|
||||
i := 0
|
||||
for i < len(escapedString) {
|
||||
currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:])
|
||||
if currentRune == utf8.RuneError {
|
||||
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i))
|
||||
}
|
||||
|
||||
// Check for escaped hex characters and convert them to their literal value for transport.
|
||||
if currentRune == '\\' {
|
||||
// http://tools.ietf.org/search/rfc4515
|
||||
// \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
|
||||
// being a member of UTF1SUBSET.
|
||||
if i+2 > len(escapedString) {
|
||||
return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
|
||||
}
|
||||
escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
|
||||
if decodeErr != nil {
|
||||
return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
|
||||
}
|
||||
buffer.WriteByte(escByte[0])
|
||||
i += 2 // +1 from end of loop, so 3 total for \xx.
|
||||
} else {
|
||||
buffer.WriteRune(currentRune)
|
||||
}
|
||||
|
||||
i += currentWidth
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
|
@ -0,0 +1,320 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// LDAP Application Codes
|
||||
const (
|
||||
ApplicationBindRequest = 0
|
||||
ApplicationBindResponse = 1
|
||||
ApplicationUnbindRequest = 2
|
||||
ApplicationSearchRequest = 3
|
||||
ApplicationSearchResultEntry = 4
|
||||
ApplicationSearchResultDone = 5
|
||||
ApplicationModifyRequest = 6
|
||||
ApplicationModifyResponse = 7
|
||||
ApplicationAddRequest = 8
|
||||
ApplicationAddResponse = 9
|
||||
ApplicationDelRequest = 10
|
||||
ApplicationDelResponse = 11
|
||||
ApplicationModifyDNRequest = 12
|
||||
ApplicationModifyDNResponse = 13
|
||||
ApplicationCompareRequest = 14
|
||||
ApplicationCompareResponse = 15
|
||||
ApplicationAbandonRequest = 16
|
||||
ApplicationSearchResultReference = 19
|
||||
ApplicationExtendedRequest = 23
|
||||
ApplicationExtendedResponse = 24
|
||||
)
|
||||
|
||||
// ApplicationMap contains human readable descriptions of LDAP Application Codes
|
||||
var ApplicationMap = map[uint8]string{
|
||||
ApplicationBindRequest: "Bind Request",
|
||||
ApplicationBindResponse: "Bind Response",
|
||||
ApplicationUnbindRequest: "Unbind Request",
|
||||
ApplicationSearchRequest: "Search Request",
|
||||
ApplicationSearchResultEntry: "Search Result Entry",
|
||||
ApplicationSearchResultDone: "Search Result Done",
|
||||
ApplicationModifyRequest: "Modify Request",
|
||||
ApplicationModifyResponse: "Modify Response",
|
||||
ApplicationAddRequest: "Add Request",
|
||||
ApplicationAddResponse: "Add Response",
|
||||
ApplicationDelRequest: "Del Request",
|
||||
ApplicationDelResponse: "Del Response",
|
||||
ApplicationModifyDNRequest: "Modify DN Request",
|
||||
ApplicationModifyDNResponse: "Modify DN Response",
|
||||
ApplicationCompareRequest: "Compare Request",
|
||||
ApplicationCompareResponse: "Compare Response",
|
||||
ApplicationAbandonRequest: "Abandon Request",
|
||||
ApplicationSearchResultReference: "Search Result Reference",
|
||||
ApplicationExtendedRequest: "Extended Request",
|
||||
ApplicationExtendedResponse: "Extended Response",
|
||||
}
|
||||
|
||||
// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
|
||||
const (
|
||||
BeheraPasswordExpired = 0
|
||||
BeheraAccountLocked = 1
|
||||
BeheraChangeAfterReset = 2
|
||||
BeheraPasswordModNotAllowed = 3
|
||||
BeheraMustSupplyOldPassword = 4
|
||||
BeheraInsufficientPasswordQuality = 5
|
||||
BeheraPasswordTooShort = 6
|
||||
BeheraPasswordTooYoung = 7
|
||||
BeheraPasswordInHistory = 8
|
||||
)
|
||||
|
||||
// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
|
||||
var BeheraPasswordPolicyErrorMap = map[int8]string{
|
||||
BeheraPasswordExpired: "Password expired",
|
||||
BeheraAccountLocked: "Account locked",
|
||||
BeheraChangeAfterReset: "Password must be changed",
|
||||
BeheraPasswordModNotAllowed: "Policy prevents password modification",
|
||||
BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
|
||||
BeheraInsufficientPasswordQuality: "Password fails quality checks",
|
||||
BeheraPasswordTooShort: "Password is too short for policy",
|
||||
BeheraPasswordTooYoung: "Password has been changed too recently",
|
||||
BeheraPasswordInHistory: "New password is in list of old passwords",
|
||||
}
|
||||
|
||||
// Adds descriptions to an LDAP Response packet for debugging
|
||||
func addLDAPDescriptions(packet *ber.Packet) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions"))
|
||||
}
|
||||
}()
|
||||
packet.Description = "LDAP Response"
|
||||
packet.Children[0].Description = "Message ID"
|
||||
|
||||
application := uint8(packet.Children[1].Tag)
|
||||
packet.Children[1].Description = ApplicationMap[application]
|
||||
|
||||
switch application {
|
||||
case ApplicationBindRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationBindResponse:
|
||||
addDefaultLDAPResponseDescriptions(packet)
|
||||
case ApplicationUnbindRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationSearchRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationSearchResultEntry:
|
||||
packet.Children[1].Children[0].Description = "Object Name"
|
||||
packet.Children[1].Children[1].Description = "Attributes"
|
||||
for _, child := range packet.Children[1].Children[1].Children {
|
||||
child.Description = "Attribute"
|
||||
child.Children[0].Description = "Attribute Name"
|
||||
child.Children[1].Description = "Attribute Values"
|
||||
for _, grandchild := range child.Children[1].Children {
|
||||
grandchild.Description = "Attribute Value"
|
||||
}
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
case ApplicationSearchResultDone:
|
||||
addDefaultLDAPResponseDescriptions(packet)
|
||||
case ApplicationModifyRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationModifyResponse:
|
||||
case ApplicationAddRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationAddResponse:
|
||||
case ApplicationDelRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationDelResponse:
|
||||
case ApplicationModifyDNRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationModifyDNResponse:
|
||||
case ApplicationCompareRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationCompareResponse:
|
||||
case ApplicationAbandonRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationSearchResultReference:
|
||||
case ApplicationExtendedRequest:
|
||||
addRequestDescriptions(packet)
|
||||
case ApplicationExtendedResponse:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addControlDescriptions(packet *ber.Packet) {
|
||||
packet.Description = "Controls"
|
||||
for _, child := range packet.Children {
|
||||
var value *ber.Packet
|
||||
controlType := ""
|
||||
child.Description = "Control"
|
||||
switch len(child.Children) {
|
||||
case 0:
|
||||
// at least one child is required for control type
|
||||
continue
|
||||
|
||||
case 1:
|
||||
// just type, no criticality or value
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
|
||||
case 2:
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
// Children[1] could be criticality or value (both are optional)
|
||||
// duck-type on whether this is a boolean
|
||||
if _, ok := child.Children[1].Value.(bool); ok {
|
||||
child.Children[1].Description = "Criticality"
|
||||
} else {
|
||||
child.Children[1].Description = "Control Value"
|
||||
value = child.Children[1]
|
||||
}
|
||||
|
||||
case 3:
|
||||
// criticality and value present
|
||||
controlType = child.Children[0].Value.(string)
|
||||
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
|
||||
child.Children[1].Description = "Criticality"
|
||||
child.Children[2].Description = "Control Value"
|
||||
value = child.Children[2]
|
||||
|
||||
default:
|
||||
// more than 3 children is invalid
|
||||
continue
|
||||
}
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
switch controlType {
|
||||
case ControlTypePaging:
|
||||
value.Description += " (Paging)"
|
||||
if value.Value != nil {
|
||||
valueChildren := ber.DecodePacket(value.Data.Bytes())
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
value.Children[0].Description = "Real Search Control Value"
|
||||
value.Children[0].Children[0].Description = "Paging Size"
|
||||
value.Children[0].Children[1].Description = "Cookie"
|
||||
|
||||
case ControlTypeBeheraPasswordPolicy:
|
||||
value.Description += " (Password Policy - Behera Draft)"
|
||||
if value.Value != nil {
|
||||
valueChildren := ber.DecodePacket(value.Data.Bytes())
|
||||
value.Data.Truncate(0)
|
||||
value.Value = nil
|
||||
value.AppendChild(valueChildren)
|
||||
}
|
||||
sequence := value.Children[0]
|
||||
for _, child := range sequence.Children {
|
||||
if child.Tag == 0 {
|
||||
//Warning
|
||||
warningPacket := child.Children[0]
|
||||
packet := ber.DecodePacket(warningPacket.Data.Bytes())
|
||||
val, ok := packet.Value.(int64)
|
||||
if ok {
|
||||
if warningPacket.Tag == 0 {
|
||||
//timeBeforeExpiration
|
||||
value.Description += " (TimeBeforeExpiration)"
|
||||
warningPacket.Value = val
|
||||
} else if warningPacket.Tag == 1 {
|
||||
//graceAuthNsRemaining
|
||||
value.Description += " (GraceAuthNsRemaining)"
|
||||
warningPacket.Value = val
|
||||
}
|
||||
}
|
||||
} else if child.Tag == 1 {
|
||||
// Error
|
||||
packet := ber.DecodePacket(child.Data.Bytes())
|
||||
val, ok := packet.Value.(int8)
|
||||
if !ok {
|
||||
val = -1
|
||||
}
|
||||
child.Description = "Error"
|
||||
child.Value = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addRequestDescriptions(packet *ber.Packet) {
|
||||
packet.Description = "LDAP Request"
|
||||
packet.Children[0].Description = "Message ID"
|
||||
packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
|
||||
if len(packet.Children) == 3 {
|
||||
addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
}
|
||||
|
||||
func addDefaultLDAPResponseDescriptions(packet *ber.Packet) {
|
||||
resultCode, _ := getLDAPResultCode(packet)
|
||||
packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
|
||||
packet.Children[1].Children[1].Description = "Matched DN"
|
||||
packet.Children[1].Children[2].Description = "Error Message"
|
||||
if len(packet.Children[1].Children) > 3 {
|
||||
packet.Children[1].Children[3].Description = "Referral"
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
addControlDescriptions(packet.Children[2])
|
||||
}
|
||||
}
|
||||
|
||||
// DebugBinaryFile reads and prints packets from the given filename
|
||||
func DebugBinaryFile(fileName string) error {
|
||||
file, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return NewError(ErrorDebugging, err)
|
||||
}
|
||||
ber.PrintBytes(os.Stdout, file, "")
|
||||
packet := ber.DecodePacket(file)
|
||||
addLDAPDescriptions(packet)
|
||||
ber.PrintPacket(packet)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
func mustEscape(c byte) bool {
|
||||
return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
|
||||
}
|
||||
|
||||
// EscapeFilter escapes from the provided LDAP filter string the special
|
||||
// characters in the set `()*\` and those out of the range 0 < c < 0x80,
|
||||
// as defined in RFC4515.
|
||||
func EscapeFilter(filter string) string {
|
||||
escape := 0
|
||||
for i := 0; i < len(filter); i++ {
|
||||
if mustEscape(filter[i]) {
|
||||
escape++
|
||||
}
|
||||
}
|
||||
if escape == 0 {
|
||||
return filter
|
||||
}
|
||||
buf := make([]byte, len(filter)+escape*2)
|
||||
for i, j := 0, 0; i < len(filter); i++ {
|
||||
c := filter[i]
|
||||
if mustEscape(c) {
|
||||
buf[j+0] = '\\'
|
||||
buf[j+1] = hex[c>>4]
|
||||
buf[j+2] = hex[c&0xf]
|
||||
j += 3
|
||||
} else {
|
||||
buf[j] = c
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// File contains Modify functionality
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc4511
|
||||
//
|
||||
// ModifyRequest ::= [APPLICATION 6] SEQUENCE {
|
||||
// object LDAPDN,
|
||||
// changes SEQUENCE OF change SEQUENCE {
|
||||
// operation ENUMERATED {
|
||||
// add (0),
|
||||
// delete (1),
|
||||
// replace (2),
|
||||
// ... },
|
||||
// modification PartialAttribute } }
|
||||
//
|
||||
// PartialAttribute ::= SEQUENCE {
|
||||
// type AttributeDescription,
|
||||
// vals SET OF value AttributeValue }
|
||||
//
|
||||
// AttributeDescription ::= LDAPString
|
||||
// -- Constrained to <attributedescription>
|
||||
// -- [RFC4512]
|
||||
//
|
||||
// AttributeValue ::= OCTET STRING
|
||||
//
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// Change operation choices
|
||||
const (
|
||||
AddAttribute = 0
|
||||
DeleteAttribute = 1
|
||||
ReplaceAttribute = 2
|
||||
)
|
||||
|
||||
// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type PartialAttribute struct {
|
||||
// Type is the type of the partial attribute
|
||||
Type string
|
||||
// Vals are the values of the partial attribute
|
||||
Vals []string
|
||||
}
|
||||
|
||||
func (p *PartialAttribute) encode() *ber.Packet {
|
||||
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
|
||||
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
|
||||
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
|
||||
for _, value := range p.Vals {
|
||||
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
|
||||
}
|
||||
seq.AppendChild(set)
|
||||
return seq
|
||||
}
|
||||
|
||||
// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
|
||||
type ModifyRequest struct {
|
||||
// DN is the distinguishedName of the directory entry to modify
|
||||
DN string
|
||||
// AddAttributes contain the attributes to add
|
||||
AddAttributes []PartialAttribute
|
||||
// DeleteAttributes contain the attributes to delete
|
||||
DeleteAttributes []PartialAttribute
|
||||
// ReplaceAttributes contain the attributes to replace
|
||||
ReplaceAttributes []PartialAttribute
|
||||
}
|
||||
|
||||
// Add inserts the given attribute to the list of attributes to add
|
||||
func (m *ModifyRequest) Add(attrType string, attrVals []string) {
|
||||
m.AddAttributes = append(m.AddAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
|
||||
}
|
||||
|
||||
// Delete inserts the given attribute to the list of attributes to delete
|
||||
func (m *ModifyRequest) Delete(attrType string, attrVals []string) {
|
||||
m.DeleteAttributes = append(m.DeleteAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
|
||||
}
|
||||
|
||||
// Replace inserts the given attribute to the list of attributes to replace
|
||||
func (m *ModifyRequest) Replace(attrType string, attrVals []string) {
|
||||
m.ReplaceAttributes = append(m.ReplaceAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
|
||||
}
|
||||
|
||||
func (m ModifyRequest) encode() *ber.Packet {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN"))
|
||||
changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
|
||||
for _, attribute := range m.AddAttributes {
|
||||
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
|
||||
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(AddAttribute), "Operation"))
|
||||
change.AppendChild(attribute.encode())
|
||||
changes.AppendChild(change)
|
||||
}
|
||||
for _, attribute := range m.DeleteAttributes {
|
||||
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
|
||||
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(DeleteAttribute), "Operation"))
|
||||
change.AppendChild(attribute.encode())
|
||||
changes.AppendChild(change)
|
||||
}
|
||||
for _, attribute := range m.ReplaceAttributes {
|
||||
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
|
||||
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(ReplaceAttribute), "Operation"))
|
||||
change.AppendChild(attribute.encode())
|
||||
changes.AppendChild(change)
|
||||
}
|
||||
request.AppendChild(changes)
|
||||
return request
|
||||
}
|
||||
|
||||
// NewModifyRequest creates a modify request for the given DN
|
||||
func NewModifyRequest(
|
||||
dn string,
|
||||
) *ModifyRequest {
|
||||
return &ModifyRequest{
|
||||
DN: dn,
|
||||
}
|
||||
}
|
||||
|
||||
// Modify performs the ModifyRequest
|
||||
func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
packet.AppendChild(modifyRequest.encode())
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationModifyResponse {
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
|
||||
}
|
||||
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
// This file contains the password modify extended operation as specified in rfc 3062
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc3062
|
||||
//
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
|
||||
)
|
||||
|
||||
// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
|
||||
type PasswordModifyRequest struct {
|
||||
// UserIdentity is an optional string representation of the user associated with the request.
|
||||
// This string may or may not be an LDAPDN [RFC2253].
|
||||
// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
|
||||
UserIdentity string
|
||||
// OldPassword, if present, contains the user's current password
|
||||
OldPassword string
|
||||
// NewPassword, if present, contains the desired password for this user
|
||||
NewPassword string
|
||||
}
|
||||
|
||||
// PasswordModifyResult holds the server response to a PasswordModifyRequest
|
||||
type PasswordModifyResult struct {
|
||||
// GeneratedPassword holds a password generated by the server, if present
|
||||
GeneratedPassword string
|
||||
}
|
||||
|
||||
func (r *PasswordModifyRequest) encode() (*ber.Packet, error) {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
|
||||
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
|
||||
extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
|
||||
passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
|
||||
if r.UserIdentity != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity"))
|
||||
}
|
||||
if r.OldPassword != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password"))
|
||||
}
|
||||
if r.NewPassword != "" {
|
||||
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password"))
|
||||
}
|
||||
|
||||
extendedRequestValue.AppendChild(passwordModifyRequestValue)
|
||||
request.AppendChild(extendedRequestValue)
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// NewPasswordModifyRequest creates a new PasswordModifyRequest
|
||||
//
|
||||
// According to the RFC 3602:
|
||||
// userIdentity is a string representing the user associated with the request.
|
||||
// This string may or may not be an LDAPDN (RFC 2253).
|
||||
// If userIdentity is empty then the operation will act on the user associated
|
||||
// with the session.
|
||||
//
|
||||
// oldPassword is the current user's password, it can be empty or it can be
|
||||
// needed depending on the session user access rights (usually an administrator
|
||||
// can change a user's password without knowing the current one) and the
|
||||
// password policy (see pwdSafeModify password policy's attribute)
|
||||
//
|
||||
// newPassword is the desired user's password. If empty the server can return
|
||||
// an error or generate a new password that will be available in the
|
||||
// PasswordModifyResult.GeneratedPassword
|
||||
//
|
||||
func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
|
||||
return &PasswordModifyRequest{
|
||||
UserIdentity: userIdentity,
|
||||
OldPassword: oldPassword,
|
||||
NewPassword: newPassword,
|
||||
}
|
||||
}
|
||||
|
||||
// PasswordModify performs the modification request
|
||||
func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
|
||||
encodedPasswordModifyRequest, err := passwordModifyRequest.encode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packet.AppendChild(encodedPasswordModifyRequest)
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
result := &PasswordModifyResult{}
|
||||
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if packet == nil {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
if packet.Children[1].Tag == ApplicationExtendedResponse {
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return nil, NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
} else {
|
||||
return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag))
|
||||
}
|
||||
|
||||
extendedResponse := packet.Children[1]
|
||||
for _, child := range extendedResponse.Children {
|
||||
if child.Tag == 11 {
|
||||
passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
|
||||
if len(passwordModifyResponseValue.Children) == 1 {
|
||||
if passwordModifyResponseValue.Children[0].Tag == 0 {
|
||||
result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -0,0 +1,450 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// File contains Search functionality
|
||||
//
|
||||
// https://tools.ietf.org/html/rfc4511
|
||||
//
|
||||
// SearchRequest ::= [APPLICATION 3] SEQUENCE {
|
||||
// baseObject LDAPDN,
|
||||
// scope ENUMERATED {
|
||||
// baseObject (0),
|
||||
// singleLevel (1),
|
||||
// wholeSubtree (2),
|
||||
// ... },
|
||||
// derefAliases ENUMERATED {
|
||||
// neverDerefAliases (0),
|
||||
// derefInSearching (1),
|
||||
// derefFindingBaseObj (2),
|
||||
// derefAlways (3) },
|
||||
// sizeLimit INTEGER (0 .. maxInt),
|
||||
// timeLimit INTEGER (0 .. maxInt),
|
||||
// typesOnly BOOLEAN,
|
||||
// filter Filter,
|
||||
// attributes AttributeSelection }
|
||||
//
|
||||
// AttributeSelection ::= SEQUENCE OF selector LDAPString
|
||||
// -- The LDAPString is constrained to
|
||||
// -- <attributeSelector> in Section 4.5.1.8
|
||||
//
|
||||
// Filter ::= CHOICE {
|
||||
// and [0] SET SIZE (1..MAX) OF filter Filter,
|
||||
// or [1] SET SIZE (1..MAX) OF filter Filter,
|
||||
// not [2] Filter,
|
||||
// equalityMatch [3] AttributeValueAssertion,
|
||||
// substrings [4] SubstringFilter,
|
||||
// greaterOrEqual [5] AttributeValueAssertion,
|
||||
// lessOrEqual [6] AttributeValueAssertion,
|
||||
// present [7] AttributeDescription,
|
||||
// approxMatch [8] AttributeValueAssertion,
|
||||
// extensibleMatch [9] MatchingRuleAssertion,
|
||||
// ... }
|
||||
//
|
||||
// SubstringFilter ::= SEQUENCE {
|
||||
// type AttributeDescription,
|
||||
// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE {
|
||||
// initial [0] AssertionValue, -- can occur at most once
|
||||
// any [1] AssertionValue,
|
||||
// final [2] AssertionValue } -- can occur at most once
|
||||
// }
|
||||
//
|
||||
// MatchingRuleAssertion ::= SEQUENCE {
|
||||
// matchingRule [1] MatchingRuleId OPTIONAL,
|
||||
// type [2] AttributeDescription OPTIONAL,
|
||||
// matchValue [3] AssertionValue,
|
||||
// dnAttributes [4] BOOLEAN DEFAULT FALSE }
|
||||
//
|
||||
//
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/asn1-ber.v1"
|
||||
)
|
||||
|
||||
// scope choices
|
||||
const (
|
||||
ScopeBaseObject = 0
|
||||
ScopeSingleLevel = 1
|
||||
ScopeWholeSubtree = 2
|
||||
)
|
||||
|
||||
// ScopeMap contains human readable descriptions of scope choices
|
||||
var ScopeMap = map[int]string{
|
||||
ScopeBaseObject: "Base Object",
|
||||
ScopeSingleLevel: "Single Level",
|
||||
ScopeWholeSubtree: "Whole Subtree",
|
||||
}
|
||||
|
||||
// derefAliases
|
||||
const (
|
||||
NeverDerefAliases = 0
|
||||
DerefInSearching = 1
|
||||
DerefFindingBaseObj = 2
|
||||
DerefAlways = 3
|
||||
)
|
||||
|
||||
// DerefMap contains human readable descriptions of derefAliases choices
|
||||
var DerefMap = map[int]string{
|
||||
NeverDerefAliases: "NeverDerefAliases",
|
||||
DerefInSearching: "DerefInSearching",
|
||||
DerefFindingBaseObj: "DerefFindingBaseObj",
|
||||
DerefAlways: "DerefAlways",
|
||||
}
|
||||
|
||||
// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
|
||||
// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
|
||||
// same input map of attributes, the output entry will contain the same order of attributes
|
||||
func NewEntry(dn string, attributes map[string][]string) *Entry {
|
||||
var attributeNames []string
|
||||
for attributeName := range attributes {
|
||||
attributeNames = append(attributeNames, attributeName)
|
||||
}
|
||||
sort.Strings(attributeNames)
|
||||
|
||||
var encodedAttributes []*EntryAttribute
|
||||
for _, attributeName := range attributeNames {
|
||||
encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
|
||||
}
|
||||
return &Entry{
|
||||
DN: dn,
|
||||
Attributes: encodedAttributes,
|
||||
}
|
||||
}
|
||||
|
||||
// Entry represents a single search result entry
|
||||
type Entry struct {
|
||||
// DN is the distinguished name of the entry
|
||||
DN string
|
||||
// Attributes are the returned attributes for the entry
|
||||
Attributes []*EntryAttribute
|
||||
}
|
||||
|
||||
// GetAttributeValues returns the values for the named attribute, or an empty list
|
||||
func (e *Entry) GetAttributeValues(attribute string) []string {
|
||||
for _, attr := range e.Attributes {
|
||||
if attr.Name == attribute {
|
||||
return attr.Values
|
||||
}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
|
||||
func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
|
||||
for _, attr := range e.Attributes {
|
||||
if attr.Name == attribute {
|
||||
return attr.ByteValues
|
||||
}
|
||||
}
|
||||
return [][]byte{}
|
||||
}
|
||||
|
||||
// GetAttributeValue returns the first value for the named attribute, or ""
|
||||
func (e *Entry) GetAttributeValue(attribute string) string {
|
||||
values := e.GetAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
|
||||
func (e *Entry) GetRawAttributeValue(attribute string) []byte {
|
||||
values := e.GetRawAttributeValues(attribute)
|
||||
if len(values) == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (e *Entry) Print() {
|
||||
fmt.Printf("DN: %s\n", e.DN)
|
||||
for _, attr := range e.Attributes {
|
||||
attr.Print()
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description indenting
|
||||
func (e *Entry) PrettyPrint(indent int) {
|
||||
fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
|
||||
for _, attr := range e.Attributes {
|
||||
attr.PrettyPrint(indent + 2)
|
||||
}
|
||||
}
|
||||
|
||||
// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
|
||||
func NewEntryAttribute(name string, values []string) *EntryAttribute {
|
||||
var bytes [][]byte
|
||||
for _, value := range values {
|
||||
bytes = append(bytes, []byte(value))
|
||||
}
|
||||
return &EntryAttribute{
|
||||
Name: name,
|
||||
Values: values,
|
||||
ByteValues: bytes,
|
||||
}
|
||||
}
|
||||
|
||||
// EntryAttribute holds a single attribute
|
||||
type EntryAttribute struct {
|
||||
// Name is the name of the attribute
|
||||
Name string
|
||||
// Values contain the string values of the attribute
|
||||
Values []string
|
||||
// ByteValues contain the raw values of the attribute
|
||||
ByteValues [][]byte
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (e *EntryAttribute) Print() {
|
||||
fmt.Printf("%s: %s\n", e.Name, e.Values)
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (e *EntryAttribute) PrettyPrint(indent int) {
|
||||
fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
|
||||
}
|
||||
|
||||
// SearchResult holds the server's response to a search request
|
||||
type SearchResult struct {
|
||||
// Entries are the returned entries
|
||||
Entries []*Entry
|
||||
// Referrals are the returned referrals
|
||||
Referrals []string
|
||||
// Controls are the returned controls
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
// Print outputs a human-readable description
|
||||
func (s *SearchResult) Print() {
|
||||
for _, entry := range s.Entries {
|
||||
entry.Print()
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs a human-readable description with indenting
|
||||
func (s *SearchResult) PrettyPrint(indent int) {
|
||||
for _, entry := range s.Entries {
|
||||
entry.PrettyPrint(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// SearchRequest represents a search request to send to the server
|
||||
type SearchRequest struct {
|
||||
BaseDN string
|
||||
Scope int
|
||||
DerefAliases int
|
||||
SizeLimit int
|
||||
TimeLimit int
|
||||
TypesOnly bool
|
||||
Filter string
|
||||
Attributes []string
|
||||
Controls []Control
|
||||
}
|
||||
|
||||
func (s *SearchRequest) encode() (*ber.Packet, error) {
|
||||
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
|
||||
request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN"))
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope"))
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases"))
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit"))
|
||||
request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit"))
|
||||
request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only"))
|
||||
// compile and encode filter
|
||||
filterPacket, err := CompileFilter(s.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
request.AppendChild(filterPacket)
|
||||
// encode attributes
|
||||
attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
|
||||
for _, attribute := range s.Attributes {
|
||||
attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
|
||||
}
|
||||
request.AppendChild(attributesPacket)
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// NewSearchRequest creates a new search request
|
||||
func NewSearchRequest(
|
||||
BaseDN string,
|
||||
Scope, DerefAliases, SizeLimit, TimeLimit int,
|
||||
TypesOnly bool,
|
||||
Filter string,
|
||||
Attributes []string,
|
||||
Controls []Control,
|
||||
) *SearchRequest {
|
||||
return &SearchRequest{
|
||||
BaseDN: BaseDN,
|
||||
Scope: Scope,
|
||||
DerefAliases: DerefAliases,
|
||||
SizeLimit: SizeLimit,
|
||||
TimeLimit: TimeLimit,
|
||||
TypesOnly: TypesOnly,
|
||||
Filter: Filter,
|
||||
Attributes: Attributes,
|
||||
Controls: Controls,
|
||||
}
|
||||
}
|
||||
|
||||
// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
|
||||
// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
|
||||
// The following four cases are possible given the arguments:
|
||||
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
|
||||
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
|
||||
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
|
||||
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
|
||||
// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
|
||||
func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
|
||||
var pagingControl *ControlPaging
|
||||
|
||||
control := FindControl(searchRequest.Controls, ControlTypePaging)
|
||||
if control == nil {
|
||||
pagingControl = NewControlPaging(pagingSize)
|
||||
searchRequest.Controls = append(searchRequest.Controls, pagingControl)
|
||||
} else {
|
||||
castControl, ok := control.(*ControlPaging)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected paging control to be of type *ControlPaging, got %v", control)
|
||||
}
|
||||
if castControl.PagingSize != pagingSize {
|
||||
return nil, fmt.Errorf("Paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
|
||||
}
|
||||
pagingControl = castControl
|
||||
}
|
||||
|
||||
searchResult := new(SearchResult)
|
||||
for {
|
||||
result, err := l.Search(searchRequest)
|
||||
l.Debug.Printf("Looking for Paging Control...")
|
||||
if err != nil {
|
||||
return searchResult, err
|
||||
}
|
||||
if result == nil {
|
||||
return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
|
||||
}
|
||||
|
||||
for _, entry := range result.Entries {
|
||||
searchResult.Entries = append(searchResult.Entries, entry)
|
||||
}
|
||||
for _, referral := range result.Referrals {
|
||||
searchResult.Referrals = append(searchResult.Referrals, referral)
|
||||
}
|
||||
for _, control := range result.Controls {
|
||||
searchResult.Controls = append(searchResult.Controls, control)
|
||||
}
|
||||
|
||||
l.Debug.Printf("Looking for Paging Control...")
|
||||
pagingResult := FindControl(result.Controls, ControlTypePaging)
|
||||
if pagingResult == nil {
|
||||
pagingControl = nil
|
||||
l.Debug.Printf("Could not find paging control. Breaking...")
|
||||
break
|
||||
}
|
||||
|
||||
cookie := pagingResult.(*ControlPaging).Cookie
|
||||
if len(cookie) == 0 {
|
||||
pagingControl = nil
|
||||
l.Debug.Printf("Could not find cookie. Breaking...")
|
||||
break
|
||||
}
|
||||
pagingControl.SetCookie(cookie)
|
||||
}
|
||||
|
||||
if pagingControl != nil {
|
||||
l.Debug.Printf("Abandoning Paging...")
|
||||
pagingControl.PagingSize = 0
|
||||
l.Search(searchRequest)
|
||||
}
|
||||
|
||||
return searchResult, nil
|
||||
}
|
||||
|
||||
// Search performs the given search request
|
||||
func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
|
||||
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
|
||||
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
|
||||
// encode search request
|
||||
encodedSearchRequest, err := searchRequest.encode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packet.AppendChild(encodedSearchRequest)
|
||||
// encode search controls
|
||||
if searchRequest.Controls != nil {
|
||||
packet.AppendChild(encodeControls(searchRequest.Controls))
|
||||
}
|
||||
|
||||
l.Debug.PrintPacket(packet)
|
||||
|
||||
msgCtx, err := l.sendMessage(packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.finishMessage(msgCtx)
|
||||
|
||||
result := &SearchResult{
|
||||
Entries: make([]*Entry, 0),
|
||||
Referrals: make([]string, 0),
|
||||
Controls: make([]Control, 0)}
|
||||
|
||||
foundSearchResultDone := false
|
||||
for !foundSearchResultDone {
|
||||
l.Debug.Printf("%d: waiting for response", msgCtx.id)
|
||||
packetResponse, ok := <-msgCtx.responses
|
||||
if !ok {
|
||||
return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
|
||||
}
|
||||
packet, err = packetResponse.ReadPacket()
|
||||
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if l.Debug {
|
||||
if err := addLDAPDescriptions(packet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ber.PrintPacket(packet)
|
||||
}
|
||||
|
||||
switch packet.Children[1].Tag {
|
||||
case 4:
|
||||
entry := new(Entry)
|
||||
entry.DN = packet.Children[1].Children[0].Value.(string)
|
||||
for _, child := range packet.Children[1].Children[1].Children {
|
||||
attr := new(EntryAttribute)
|
||||
attr.Name = child.Children[0].Value.(string)
|
||||
for _, value := range child.Children[1].Children {
|
||||
attr.Values = append(attr.Values, value.Value.(string))
|
||||
attr.ByteValues = append(attr.ByteValues, value.ByteValue)
|
||||
}
|
||||
entry.Attributes = append(entry.Attributes, attr)
|
||||
}
|
||||
result.Entries = append(result.Entries, entry)
|
||||
case 5:
|
||||
resultCode, resultDescription := getLDAPResultCode(packet)
|
||||
if resultCode != 0 {
|
||||
return result, NewError(resultCode, errors.New(resultDescription))
|
||||
}
|
||||
if len(packet.Children) == 3 {
|
||||
for _, child := range packet.Children[2].Children {
|
||||
result.Controls = append(result.Controls, DecodeControl(child))
|
||||
}
|
||||
}
|
||||
foundSearchResultDone = true
|
||||
case 19:
|
||||
result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
|
||||
}
|
||||
}
|
||||
l.Debug.Printf("%d: returning", msgCtx.id)
|
||||
return result, nil
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
# github.com/pyke369/golang-support v0.0.0-20190703174728-34ca97aa79e9
|
||||
github.com/pyke369/golang-support/uconfig
|
||||
github.com/pyke369/golang-support/rcache
|
||||
# gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d
|
||||
gopkg.in/asn1-ber.v1
|
||||
# gopkg.in/ldap.v2 v2.5.1
|
||||
gopkg.in/ldap.v2
|
Loading…
Reference in New Issue