From b5fcedc4d59d02f505ba07a69b73af492dafcea4 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Tue, 24 Jan 2023 10:06:04 -0500 Subject: [PATCH 01/44] QUIC/UDP updates - In quic.Dial, support non-*net.UDPConn packet conns. This allows for wrapped and artificial net.PacketConn transports. - Clarify state of QUIC ECN bit processing. Guard against accidental future exposure of potential ECN fingerprint in obfuscated QUIC connections (client-side/upstream only). - Update UDPConn to use same Control method as TCPConn; add an explicit check that the dialed conn is a *net.UDPConn, as required to support QUIC ECN operations; and document behavior of non-bound UDP sockets. - Add UDP write deadline fix to QUICTransport (FRONTED-QUIC) case, missed in https://github.com/Psiphon-Labs/psiphon-tunnel-core/commit/b8c5c1948368f64a4191a3b525abfcc71d629429. --- psiphon/UDPConn.go | 76 ++++++++++++++---- psiphon/UDPConn_bind.go | 65 ---------------- psiphon/UDPConn_nobind.go | 44 ----------- psiphon/common/quic/obfuscator.go | 125 ++++++++++++++++++++---------- psiphon/common/quic/quic.go | 119 ++++++++++++++++------------ 5 files changed, 216 insertions(+), 213 deletions(-) delete mode 100644 psiphon/UDPConn_bind.go delete mode 100644 psiphon/UDPConn_nobind.go diff --git a/psiphon/UDPConn.go b/psiphon/UDPConn.go index 4b6ceffc6..9841e1043 100644 --- a/psiphon/UDPConn.go +++ b/psiphon/UDPConn.go @@ -29,15 +29,16 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" ) -// NewUDPConn resolves addr and configures a new UDP conn. The UDP socket is -// created using options in DialConfig, including DeviceBinder. The returned -// UDPAddr uses DialConfig options IPv6Synthesizer and ResolvedIPCallback. +// NewUDPConn resolves addr and configures a new *net.UDPConn. The UDP socket +// is created using options in DialConfig, including DeviceBinder. The +// returned UDPAddr uses DialConfig options IPv6Synthesizer and +// ResolvedIPCallback. // // The UDP conn is not dialed; it is intended for use with WriteTo using the // returned UDPAddr, not Write. // -// The returned conn is not a Closer; the caller is expected to wrap this conn -// with another higher-level conn that provides that interface. +// The returned conn is not a common.Closer; the caller is expected to wrap +// this conn with another higher-level conn that provides that interface. func NewUDPConn( ctx context.Context, addr string, config *DialConfig) (net.PacketConn, *net.UDPAddr, error) { @@ -81,23 +82,70 @@ func NewUDPConn( } } - var domain int - if ipAddr != nil && ipAddr.To4() != nil { - domain = syscall.AF_INET - } else if ipAddr != nil && ipAddr.To16() != nil { - domain = syscall.AF_INET6 - } else { - return nil, nil, errors.Tracef("invalid IP address: %s", ipAddr.String()) + listen := &net.ListenConfig{ + Control: func(_, _ string, c syscall.RawConn) error { + var controlErr error + err := c.Control(func(fd uintptr) { + + socketFD := int(fd) + + setAdditionalSocketOptions(socketFD) + + if config.BPFProgramInstructions != nil { + err := setSocketBPF(config.BPFProgramInstructions, socketFD) + if err != nil { + controlErr = errors.Tracef("setSocketBPF failed: %s", err) + return + } + } + + if config.DeviceBinder != nil { + _, err := config.DeviceBinder.BindToDevice(socketFD) + if err != nil { + controlErr = errors.Tracef("BindToDevice failed: %s", err) + return + } + } + }) + if controlErr != nil { + return errors.Trace(controlErr) + } + return errors.Trace(err) + }, + } + + network := "udp4" + if ipAddr.To4() == nil { + network = "udp6" } - conn, err := newUDPConn(domain, config) + // It's necessary to create an unbound UDP socket, for use with WriteTo, + // as required by quic-go. As documented in net.ListenUDP: with an + // unspecified IP address, the resulting conn "listens on all available + // IP addresses of the local system except multicast IP addresses". + // + // Limitation: these UDP sockets are not necessarily closed when a device + // changes active network (e.g., WiFi to mobile). It's possible that a + // QUIC connection does not immediately close on a network change, and + // instead outbound packets are sent from a different active interface. + // As quic-go does not yet support connection migration, these packets + // will be dropped by the server. This situation is mitigated by network + // change event detection, which initiates new tunnel connections, and by + // timeouts/keep-alives. + + conn, err := listen.ListenPacket(ctx, network, "") if err != nil { return nil, nil, errors.Trace(err) } + udpConn, ok := conn.(*net.UDPConn) + if !ok { + return nil, nil, errors.Tracef("unexpected conn type: %T", conn) + } + if config.ResolvedIPCallback != nil { config.ResolvedIPCallback(ipAddr.String()) } - return conn, &net.UDPAddr{IP: ipAddr, Port: port}, nil + return udpConn, &net.UDPAddr{IP: ipAddr, Port: port}, nil } diff --git a/psiphon/UDPConn_bind.go b/psiphon/UDPConn_bind.go deleted file mode 100644 index 1ed994938..000000000 --- a/psiphon/UDPConn_bind.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build !windows - -/* - * Copyright (c) 2018, Psiphon Inc. - * All rights reserved. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -package psiphon - -import ( - "net" - "os" - "syscall" - - "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" -) - -func newUDPConn(domain int, config *DialConfig) (net.PacketConn, error) { - - // TODO: use https://golang.org/pkg/net/#Dialer.Control, introduced in Go 1.11? - - socketFD, err := syscall.Socket(domain, syscall.SOCK_DGRAM, 0) - if err != nil { - return nil, errors.Trace(err) - } - - syscall.CloseOnExec(socketFD) - - setAdditionalSocketOptions(socketFD) - - if config.DeviceBinder != nil { - _, err = config.DeviceBinder.BindToDevice(socketFD) - if err != nil { - syscall.Close(socketFD) - return nil, errors.Tracef("BindToDevice failed: %s", err) - } - } - - // Convert the socket fd to a net.PacketConn - // This code block is from: - // https://github.com/golang/go/issues/6966 - - file := os.NewFile(uintptr(socketFD), "") - conn, err := net.FilePacketConn(file) // net.FilePackateConn() dups socketFD - file.Close() // file.Close() closes socketFD - if err != nil { - return nil, errors.Trace(err) - } - - return conn, nil -} diff --git a/psiphon/UDPConn_nobind.go b/psiphon/UDPConn_nobind.go deleted file mode 100644 index bbbc040fc..000000000 --- a/psiphon/UDPConn_nobind.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build windows - -/* - * Copyright (c) 2018, Psiphon Inc. - * All rights reserved. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -package psiphon - -import ( - "net" - "syscall" - - "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" -) - -func newUDPConn(domain int, config *DialConfig) (net.PacketConn, error) { - - if config.DeviceBinder != nil { - return nil, errors.TraceNew("newUDPConn with DeviceBinder not supported on this platform") - } - - network := "udp4" - - if domain == syscall.AF_INET6 { - network = "udp6" - } - - return net.ListenUDP(network, nil) -} diff --git a/psiphon/common/quic/obfuscator.go b/psiphon/common/quic/obfuscator.go index 4148d3d8b..13f207413 100644 --- a/psiphon/common/quic/obfuscator.go +++ b/psiphon/common/quic/obfuscator.go @@ -102,7 +102,7 @@ const ( // payload will increase UDP packets beyond the QUIC max of 1280 bytes, // introducing some risk of fragmentation and/or dropped packets. type ObfuscatedPacketConn struct { - ietf_quic.OOBCapablePacketConn + net.PacketConn isServer bool isIETFClient bool isDecoyClient bool @@ -130,79 +130,74 @@ func (p *peerMode) isStale() bool { // NewObfuscatedPacketConn creates a new ObfuscatedPacketConn. func NewObfuscatedPacketConn( - conn net.PacketConn, + packetConn net.PacketConn, isServer bool, isIETFClient bool, isDecoyClient bool, obfuscationKey string, paddingSeed *prng.Seed) (*ObfuscatedPacketConn, error) { - oobPacketConn, ok := conn.(ietf_quic.OOBCapablePacketConn) - if !ok { - return nil, errors.TraceNew("conn must support OOBCapablePacketConn") - } - // There is no replay of obfuscation "encryption", just padding. nonceSeed, err := prng.NewSeed() if err != nil { return nil, errors.Trace(err) } - packetConn := &ObfuscatedPacketConn{ - OOBCapablePacketConn: oobPacketConn, - isServer: isServer, - isIETFClient: isIETFClient, - isDecoyClient: isDecoyClient, - peerModes: make(map[string]*peerMode), - noncePRNG: prng.NewPRNGWithSeed(nonceSeed), - paddingPRNG: prng.NewPRNGWithSeed(paddingSeed), + conn := &ObfuscatedPacketConn{ + PacketConn: packetConn, + isServer: isServer, + isIETFClient: isIETFClient, + isDecoyClient: isDecoyClient, + peerModes: make(map[string]*peerMode), + noncePRNG: prng.NewPRNGWithSeed(nonceSeed), + paddingPRNG: prng.NewPRNGWithSeed(paddingSeed), } secret := []byte(obfuscationKey) salt := []byte("quic-obfuscation-key") _, err = io.ReadFull( - hkdf.New(sha256.New, secret, salt, nil), packetConn.obfuscationKey[:]) + hkdf.New(sha256.New, secret, salt, nil), conn.obfuscationKey[:]) if err != nil { return nil, errors.Trace(err) } if isDecoyClient { - packetConn.decoyPacketCount = int32(packetConn.paddingPRNG.Range( + conn.decoyPacketCount = int32(conn.paddingPRNG.Range( MIN_DECOY_PACKETS, MAX_DECOY_PACKETS)) - packetConn.decoyBuffer = make([]byte, MAX_PACKET_SIZE) + conn.decoyBuffer = make([]byte, MAX_PACKET_SIZE) } if isServer { - packetConn.runWaitGroup = new(sync.WaitGroup) - packetConn.stopBroadcast = make(chan struct{}) + conn.runWaitGroup = new(sync.WaitGroup) + conn.stopBroadcast = make(chan struct{}) // Reap stale peer mode information to reclaim memory. - packetConn.runWaitGroup.Add(1) + conn.runWaitGroup.Add(1) go func() { - defer packetConn.runWaitGroup.Done() + defer conn.runWaitGroup.Done() ticker := time.NewTicker(SERVER_IDLE_TIMEOUT / 2) defer ticker.Stop() for { select { case <-ticker.C: - packetConn.peerModesMutex.Lock() - for address, mode := range packetConn.peerModes { + conn.peerModesMutex.Lock() + for address, mode := range conn.peerModes { if mode.isStale() { - delete(packetConn.peerModes, address) + delete(conn.peerModes, address) } } - packetConn.peerModesMutex.Unlock() - case <-packetConn.stopBroadcast: + conn.peerModesMutex.Unlock() + case <-conn.stopBroadcast: return } } }() } - return packetConn, nil + return conn, nil } func (conn *ObfuscatedPacketConn) Close() error { @@ -217,7 +212,7 @@ func (conn *ObfuscatedPacketConn) Close() error { conn.runWaitGroup.Wait() } - return conn.OOBCapablePacketConn.Close() + return conn.PacketConn.Close() } type temporaryNetError struct { @@ -242,7 +237,7 @@ func (e *temporaryNetError) Error() string { func (conn *ObfuscatedPacketConn) ReadFrom(p []byte) (int, net.Addr, error) { n, _, _, addr, _, err := conn.readPacketWithType(p, nil) - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, addr, err } @@ -252,7 +247,7 @@ func (conn *ObfuscatedPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) return 0, errors.TraceNew("unexpected addr type") } n, _, err := conn.writePacket(p, nil, udpAddr) - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, err } @@ -273,13 +268,13 @@ func (conn *ObfuscatedPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) func (conn *ObfuscatedPacketConn) ReadMsgUDP(p, oob []byte) (int, int, int, *net.UDPAddr, error) { n, oobn, flags, addr, _, err := conn.readPacketWithType(p, nil) - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, oobn, flags, addr, err } func (conn *ObfuscatedPacketConn) WriteMsgUDP(p, oob []byte, addr *net.UDPAddr) (int, int, error) { n, oobn, err := conn.writePacket(p, oob, addr) - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, oobn, err } @@ -298,7 +293,7 @@ func (conn *ObfuscatedPacketConn) ReadBatch(ms []ipv4.Message, _ int) (int, erro ms[0].N, ms[0].NN, ms[0].Flags, ms[0].Addr, _, err = conn.readPacketWithType(ms[0].Buffers[0], ms[0].OOB) if err != nil { - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return 0, err } return 1, nil @@ -363,7 +358,7 @@ func (conn *ObfuscatedPacketConn) readPacketWithType( continue } - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, oobn, flags, addr, isIETF, err } } @@ -371,7 +366,26 @@ func (conn *ObfuscatedPacketConn) readPacketWithType( func (conn *ObfuscatedPacketConn) readPacket( p, oob []byte) (int, int, int, *net.UDPAddr, bool, error) { - n, oobn, flags, addr, err := conn.OOBCapablePacketConn.ReadMsgUDP(p, oob) + var n, oobn, flags int + var addr *net.UDPAddr + var err error + + oobCapablePacketConn, ok := conn.PacketConn.(ietf_quic.OOBCapablePacketConn) + if ok { + // Read OOB ECN bits when supported by the packet conn. + n, oobn, flags, addr, err = oobCapablePacketConn.ReadMsgUDP(p, oob) + } else { + // Fall back to a generic ReadFrom, supported by any packet conn. + var netAddr net.Addr + n, netAddr, err = conn.PacketConn.ReadFrom(p) + if netAddr != nil { + // Directly convert from net.Addr to *net.UDPAddr, if possible. + addr, ok = netAddr.(*net.UDPAddr) + if !ok { + addr, err = net.ResolveUDPAddr("udp", netAddr.String()) + } + } + } // Data is processed even when err != nil, as ReadFrom may return both // a packet and an error, such as io.EOF. @@ -556,7 +570,7 @@ func (conn *ObfuscatedPacketConn) readPacket( } } - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, oobn, flags, addr, isIETF, err } @@ -648,15 +662,42 @@ func (conn *ObfuscatedPacketConn) writePacket( } } - _, oobn, err := conn.OOBCapablePacketConn.WriteMsgUDP(p, oob, addr) + var oobn int + var err error + + oobCapablePacketConn, ok := conn.PacketConn.(ietf_quic.OOBCapablePacketConn) + if ok { + + // Write OOB bits if supported by the packet conn. + // + // At this time, quic-go reads but does not write ECN OOB bits. On the + // client-side, the Dial function arranges for conn.PacketConn to not + // implement OOBCapablePacketConn when using obfuscated QUIC, and so + // quic-go is not expected to write ECN bits -- a potential + // obfuscation fingerprint -- in the future, on the client-side. + // + // Limitation: on the server-side, the single UDP server socket is + // wrapped with ObfuscatedPacketConn and supports both obfuscated and + // regular QUIC; as it stands, this logic will support writing ECN + // bits for both obfuscated and regular QUIC. + + _, oobn, err = oobCapablePacketConn.WriteMsgUDP(p, oob, addr) + + } else { + + // Fall back to WriteTo, supported by any packet conn. If there are + // OOB bits to be written, fail. + + if oob != nil { + return 0, 0, errors.TraceNew("unexpected OOB payload for non-OOBCapablePacketConn") + } + _, err = conn.PacketConn.WriteTo(p, addr) + } - // quic-go uses OOB to manipulate ECN bits in the IP header; these are not - // obfuscated. - // // Return n = len(input p) bytes written even when p is an obfuscated // buffer and longer than the input p. - // Do not wrap any I/O err returned by conn.OOBCapablePacketConn + // Do not wrap any I/O err returned by conn.PacketConn return n, oobn, err } diff --git a/psiphon/common/quic/quic.go b/psiphon/common/quic/quic.go index b694c2d43..656181cd8 100644 --- a/psiphon/common/quic/quic.go +++ b/psiphon/common/quic/quic.go @@ -21,7 +21,6 @@ */ /* - Package quic wraps github.com/lucas-clemente/quic-go with net.Listener and net.Conn types that provide a drop-in replacement for net.TCPConn. @@ -39,7 +38,6 @@ Conns mask or translate qerr.PeerGoingAway to io.EOF as appropriate. QUIC idle timeouts and keep alives are tuned to mitigate aggressive UDP NAT timeouts on mobile data networks while accounting for the fact that mobile devices in standby/sleep may not be able to initiate the keep alive. - */ package quic @@ -50,7 +48,6 @@ import ( "io" "net" "net/http" - "os" "sync" "sync/atomic" "syscall" @@ -347,8 +344,8 @@ func (listener *Listener) Close() error { // may be cancelled by ctx; packetConn will be closed if the dial is // cancelled or fails. // -// Keep alive and idle timeout functionality in QUIC is disabled as these -// aspects are expected to be handled at a higher level. +// When packetConn is a *net.UDPConn, QUIC ECN bit operations are supported, +// unless the specified QUIC version is obfuscated. func Dial( ctx context.Context, packetConn net.PacketConn, @@ -386,14 +383,38 @@ func Dial( return nil, errors.Tracef("invalid destination port: %d", remoteAddr.Port) } - udpConn, ok := packetConn.(udpConn) - if !ok { - return nil, errors.TraceNew("packetConn must implement net.UDPConn functions") - } + udpConn, ok := packetConn.(*net.UDPConn) - // Ensure blocked packet writes eventually timeout. - packetConn = &writeTimeoutUDPConn{ - udpConn: udpConn, + if !ok || isObfuscated(quicVersion) { + + // quic-go uses OOB operations to manipulate ECN bits in IP packet + // headers. These operations are available only when the packet conn + // is a *net.UDPConn. At this time, quic-go reads but does not write + // ECN OOB bits; see quic-go PR 2789. + // + // To guard against future writes to ECN bits, a potential fingerprint + // when using obfuscated QUIC, this non-OOB code path is taken for + // isObfuscated QUIC versions. This mitigates upstream fingerprints; + // see ObfuscatedPacketConn.writePacket for the server-side + // downstream limitation. + + // Ensure blocked packet writes eventually timeout. + packetConn = &writeTimeoutPacketConn{ + PacketConn: packetConn, + } + + // Double check that OOB support won't be detected by quic-go. + _, ok := packetConn.(ietf_quic.OOBCapablePacketConn) + if ok { + return nil, errors.TraceNew("unexpected OOBCapablePacketConn") + } + + } else { + + // Ensure blocked packet writes eventually timeout. + packetConn = &writeTimeoutUDPConn{ + UDPConn: udpConn, + } } maxPacketSizeAdjustment := 0 @@ -443,6 +464,7 @@ func Dial( connection, err := dialQUIC( ctx, packetConn, + false, remoteAddr, quicSNIAddress, versionNumber, @@ -501,35 +523,6 @@ func Dial( return conn, nil } -// udpConn matches net.UDPConn, which implements both net.Conn and -// net.PacketConn. udpConn enables handling of Dial packetConn inputs that -// are not concrete *net.UDPConn types but which still implement all the -// required functions. A udpConn instance can be passed to quic-go; various -// quic-go code paths check that the input conn implements net.Conn and/or -// net.PacketConn. -// -// TODO: add *AddrPort functions introduced in Go 1.18 -type udpConn interface { - Close() error - File() (f *os.File, err error) - LocalAddr() net.Addr - Read(b []byte) (int, error) - ReadFrom(b []byte) (int, net.Addr, error) - ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) - ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *net.UDPAddr, err error) - RemoteAddr() net.Addr - SetDeadline(t time.Time) error - SetReadBuffer(bytes int) error - SetReadDeadline(t time.Time) error - SetWriteBuffer(bytes int) error - SetWriteDeadline(t time.Time) error - SyscallConn() (syscall.RawConn, error) - Write(b []byte) (int, error) - WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (n, oobn int, err error) - WriteTo(b []byte, addr net.Addr) (int, error) - WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) -} - // writeTimeoutUDPConn sets write deadlines before each UDP packet write. // // Generally, a UDP packet write doesn't block. However, Go's @@ -543,7 +536,7 @@ type udpConn interface { // Note that quic-go manages read deadlines; we set only the write deadline // here. type writeTimeoutUDPConn struct { - udpConn + *net.UDPConn } func (conn *writeTimeoutUDPConn) Write(b []byte) (int, error) { @@ -554,7 +547,7 @@ func (conn *writeTimeoutUDPConn) Write(b []byte) (int, error) { } // Do not wrap any I/O err returned by udpConn - return conn.udpConn.Write(b) + return conn.UDPConn.Write(b) } func (conn *writeTimeoutUDPConn) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (int, int, error) { @@ -565,7 +558,7 @@ func (conn *writeTimeoutUDPConn) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) ( } // Do not wrap any I/O err returned by udpConn - return conn.udpConn.WriteMsgUDP(b, oob, addr) + return conn.UDPConn.WriteMsgUDP(b, oob, addr) } func (conn *writeTimeoutUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { @@ -576,7 +569,7 @@ func (conn *writeTimeoutUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { } // Do not wrap any I/O err returned by udpConn - return conn.udpConn.WriteTo(b, addr) + return conn.UDPConn.WriteTo(b, addr) } func (conn *writeTimeoutUDPConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) { @@ -587,7 +580,24 @@ func (conn *writeTimeoutUDPConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, e } // Do not wrap any I/O err returned by udpConn - return conn.udpConn.WriteToUDP(b, addr) + return conn.UDPConn.WriteToUDP(b, addr) +} + +// writeTimeoutPacketConn is the equivilent of writeTimeoutUDPConn for +// non-*net.UDPConns. +type writeTimeoutPacketConn struct { + net.PacketConn +} + +func (conn *writeTimeoutPacketConn) WriteTo(b []byte, addr net.Addr) (int, error) { + + err := conn.SetWriteDeadline(time.Now().Add(UDP_PACKET_WRITE_TIMEOUT)) + if err != nil { + return 0, errors.Trace(err) + } + + // Do not wrap any I/O err returned by udpConn + return conn.PacketConn.WriteTo(b, addr) } // Conn is a net.Conn and psiphon/common.Closer. @@ -864,9 +874,21 @@ func (t *QUICTransporter) dialQUIC() (retConnection quicConnection, retErr error return nil, errors.Trace(err) } + // Check for a *net.UDPConn, as expected, to support OOB operations. + udpConn, ok := packetConn.(*net.UDPConn) + if !ok { + return nil, errors.Tracef("unexpected packetConn type: %T", packetConn) + } + + // Ensure blocked packet writes eventually timeout. + packetConn = &writeTimeoutUDPConn{ + UDPConn: udpConn, + } + connection, err := dialQUIC( ctx, packetConn, + true, remoteAddr, t.quicSNIAddress, versionNumber, @@ -994,6 +1016,7 @@ func (c *ietfQUICConnection) isErrorIndicatingClosed(err error) bool { func dialQUIC( ctx context.Context, packetConn net.PacketConn, + expectNetUDPConn bool, remoteAddr *net.UDPAddr, quicSNIAddress string, versionNumber uint32, @@ -1153,7 +1176,7 @@ func (conn *muxPacketConn) SetWriteDeadline(t time.Time) error { // https://godoc.org/github.com/lucas-clemente/quic-go#ECNCapablePacketConn. func (conn *muxPacketConn) SetReadBuffer(bytes int) error { - c, ok := conn.listener.conn.OOBCapablePacketConn.(interface { + c, ok := conn.listener.conn.PacketConn.(interface { SetReadBuffer(int) error }) if !ok { @@ -1163,7 +1186,7 @@ func (conn *muxPacketConn) SetReadBuffer(bytes int) error { } func (conn *muxPacketConn) SyscallConn() (syscall.RawConn, error) { - c, ok := conn.listener.conn.OOBCapablePacketConn.(interface { + c, ok := conn.listener.conn.PacketConn.(interface { SyscallConn() (syscall.RawConn, error) }) if !ok { From 407c2d05c5872919c6531328002fde30d91e470e Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Tue, 24 Jan 2023 11:21:12 -0500 Subject: [PATCH 02/44] Fix error check to work with quic-go v0.31.1 --- psiphon/common/quic/quic.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/psiphon/common/quic/quic.go b/psiphon/common/quic/quic.go index 656181cd8..d90fc91b7 100644 --- a/psiphon/common/quic/quic.go +++ b/psiphon/common/quic/quic.go @@ -48,6 +48,7 @@ import ( "io" "net" "net/http" + "strings" "sync" "sync/atomic" "syscall" @@ -1007,9 +1008,9 @@ func (c *ietfQUICConnection) isErrorIndicatingClosed(err error) bool { return false } errStr := err.Error() - // The target errors are of type qerr.ApplicationError and - // qerr.IdleTimeoutError, but these are not exported by quic-go. - return errStr == "Application error 0x0" || + // The target errors are of type qerr.ApplicationError[Code] and + // qerr.IdleTimeoutError, but these are not both exported by quic-go. + return strings.HasPrefix(errStr, "Application error 0x0") || errStr == "timeout: no recent network activity" } From 71135c831e948e5901914121ad595f5c6623da65 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Tue, 24 Jan 2023 12:13:10 -0500 Subject: [PATCH 03/44] Update vendored Psiphon-Labs/quic-go - Update to upstream quic-go v0.31.1 - Enable Go 1.20 support --- go.mod | 8 +- go.sum | 9 + .../github.com/Psiphon-Labs/quic-go/README.md | 3 +- .../github.com/Psiphon-Labs/quic-go/client.go | 12 +- .../Psiphon-Labs/quic-go/conn_id_generator.go | 12 +- .../Psiphon-Labs/quic-go/conn_id_manager.go | 2 +- .../Psiphon-Labs/quic-go/connection.go | 366 ++++++++++++------ .../Psiphon-Labs/quic-go/datagram_queue.go | 20 +- .../Psiphon-Labs/quic-go/http3/body.go | 3 + .../Psiphon-Labs/quic-go/http3/capsule.go | 55 +++ .../Psiphon-Labs/quic-go/http3/client.go | 10 +- .../Psiphon-Labs/quic-go/http3/frames.go | 27 +- .../Psiphon-Labs/quic-go/http3/http_stream.go | 15 +- .../quic-go/http3/request_writer.go | 7 +- .../quic-go/http3/response_writer.go | 14 +- .../Psiphon-Labs/quic-go/http3/server.go | 59 ++- .../Psiphon-Labs/quic-go/interface.go | 26 +- .../quic-go/internal/ackhandler/interfaces.go | 20 - .../quic-go/internal/ackhandler/packet.go | 49 +++ .../ackhandler/received_packet_handler.go | 12 +- .../ackhandler/received_packet_history.go | 17 +- .../ackhandler/received_packet_tracker.go | 28 +- .../ackhandler/sent_packet_handler.go | 38 +- .../ackhandler/sent_packet_history.go | 70 ++-- .../flowcontrol/base_flow_controller.go | 2 +- .../internal/handshake/crypto_setup.go | 11 +- .../internal/handshake/initial_aead.go | 2 +- .../internal/handshake/session_ticket.go | 9 +- .../internal/handshake/token_generator.go | 8 +- .../quic-go/internal/logutils/frame.go | 17 + .../internal/protocol/connection_id.go | 85 ++-- .../quic-go/internal/protocol/params.go | 2 +- .../quic-go/internal/qerr/error_codes.go | 2 +- .../quic-go/internal/qerr/errors.go | 17 +- .../quic-go/internal/qtls/go119.go | 2 +- .../quic-go/internal/qtls/go120.go | 107 ++++- .../quic-go/internal/utils/byteorder.go | 4 + .../internal/utils/byteorder_big_endian.go | 14 + .../quic-go/internal/wire/ack_frame.go | 32 +- .../quic-go/internal/wire/ack_frame_pool.go | 24 ++ .../internal/wire/connection_close_frame.go | 16 +- .../quic-go/internal/wire/crypto_frame.go | 12 +- .../internal/wire/data_blocked_frame.go | 9 +- .../quic-go/internal/wire/datagram_frame.go | 12 +- .../quic-go/internal/wire/frame_parser.go | 16 +- .../internal/wire/handshake_done_frame.go | 5 +- .../quic-go/internal/wire/header.go | 62 ++- .../quic-go/internal/wire/interface.go | 6 +- .../quic-go/internal/wire/max_data_frame.go | 10 +- .../internal/wire/max_stream_data_frame.go | 10 +- .../internal/wire/max_streams_frame.go | 10 +- .../internal/wire/new_connection_id_frame.go | 21 +- .../quic-go/internal/wire/new_token_frame.go | 10 +- .../internal/wire/path_challenge_frame.go | 8 +- .../internal/wire/path_response_frame.go | 8 +- .../quic-go/internal/wire/ping_frame.go | 7 +- .../internal/wire/reset_stream_frame.go | 12 +- .../wire/retire_connection_id_frame.go | 8 +- .../quic-go/internal/wire/short_header.go | 55 +++ .../internal/wire/stop_sending_frame.go | 10 +- .../wire/stream_data_blocked_frame.go | 10 +- .../quic-go/internal/wire/stream_frame.go | 28 +- .../internal/wire/streams_blocked_frame.go | 10 +- .../internal/wire/transport_parameters.go | 166 ++++---- .../internal/wire/version_negotiation.go | 35 +- .../Psiphon-Labs/quic-go/logging/interface.go | 20 +- .../Psiphon-Labs/quic-go/logging/mockgen.go | 4 +- .../Psiphon-Labs/quic-go/logging/multiplex.go | 24 +- .../quic-go/logging/null_tracer.go | 31 +- .../Psiphon-Labs/quic-go/mockgen.go | 4 +- .../Psiphon-Labs/quic-go/mockgen_private.sh | 2 +- .../Psiphon-Labs/quic-go/multiplexer.go | 11 +- .../quic-go/packet_handler_map.go | 38 +- .../Psiphon-Labs/quic-go/packet_packer.go | 291 ++++++-------- .../Psiphon-Labs/quic-go/packet_unpacker.go | 106 +++-- .../Psiphon-Labs/quic-go/quicvarint/varint.go | 19 + .../Psiphon-Labs/quic-go/send_queue.go | 7 + .../Psiphon-Labs/quic-go/send_stream.go | 2 +- .../github.com/Psiphon-Labs/quic-go/server.go | 94 +++-- .../Psiphon-Labs/quic-go/sys_conn_oob.go | 50 ++- .../github.com/Psiphon-Labs/quic-go/tools.go | 3 +- .../github.com/marten-seemann/qpack/README.md | 3 +- .../marten-seemann/qpack/encoder.go | 4 +- .../marten-seemann/qpack/static_table.go | 1 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 14 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1 + vendor/gopkg.in/yaml.v3/.travis.yml | 16 - vendor/gopkg.in/yaml.v3/apic.go | 1 + vendor/gopkg.in/yaml.v3/decode.go | 143 +++++-- vendor/gopkg.in/yaml.v3/emitterc.go | 58 ++- vendor/gopkg.in/yaml.v3/encode.go | 30 +- vendor/gopkg.in/yaml.v3/parserc.go | 57 ++- vendor/gopkg.in/yaml.v3/scannerc.go | 49 ++- vendor/gopkg.in/yaml.v3/yaml.go | 40 +- vendor/gopkg.in/yaml.v3/yamlh.go | 2 + vendor/modules.txt | 10 +- 96 files changed, 1885 insertions(+), 1026 deletions(-) create mode 100644 vendor/github.com/Psiphon-Labs/quic-go/http3/capsule.go create mode 100644 vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go create mode 100644 vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame_pool.go create mode 100644 vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml diff --git a/go.mod b/go.mod index d6304b673..8b211c6b7 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7 github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464 - github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3 + github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 @@ -51,7 +51,7 @@ require ( golang.org/x/crypto v0.0.0-20221012134737-56aed061732a golang.org/x/net v0.0.0-20221014081412-f15817d10f9b golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 + golang.org/x/sys v0.2.0 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 ) @@ -73,7 +73,7 @@ require ( github.com/josharian/native v1.0.0 // indirect github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect github.com/klauspost/compress v1.15.10-0.20220729101446-5a3a4a965cc6 // indirect - github.com/marten-seemann/qpack v0.2.1 // indirect + github.com/marten-seemann/qpack v0.3.0 // indirect github.com/mdlayher/netlink v1.4.2-0.20210930205308-a81a8c23d40a // indirect github.com/mdlayher/socket v0.0.0-20210624160740-9dbe287ded84 // indirect github.com/mroth/weightedrand v0.4.1 // indirect @@ -87,6 +87,6 @@ require ( golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.12 // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.2.1 // indirect ) diff --git a/go.sum b/go.sum index 2b1ea9288..44368fee0 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,8 @@ github.com/Psiphon-Labs/quic-go v0.0.0-20221012142746-f3b75b440582 h1:T8I7L8tHDI github.com/Psiphon-Labs/quic-go v0.0.0-20221012142746-f3b75b440582/go.mod h1:Dcp6EsbioehAvUSMTMbqFV6Z+q+IApml2Q3r8eXQS5M= github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3 h1:BKSZdSkhOGV23vBl4cYPabzkRlWk5Zm7Snpo4i0lSXQ= github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3/go.mod h1:llhtSl7dUXTssUN4m4MjUDJrULGNxgZBMKYjExuk6EM= +github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 h1:xTGnmXqEEUphnohYLGwGrlVzWPNpTNAFViJYxQUMHRU= +github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66/go.mod h1:cu4yhfHkyt+uQ9FFFjTpjCjcQYf52ntEAyoV4Zg0+fg= github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad h1:m6HS84+b5xDPLj7D/ya1CeixyaHOCZoMbBilJ48y+Ts= github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad/go.mod h1:v3y9GXFo9Sf2mO6auD2ExGG7oDgrK8TI7eb49ZnUxrE= github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI= @@ -131,6 +133,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= +github.com/marten-seemann/qpack v0.3.0/go.mod h1:cGfKPBiP4a9EQdxCwEwI/GEeWAsjSekBvx/X8mh58+g= github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a h1:6SRny9FLB1eWasPyDUqBQnMi9NhXU01XIlB0ao89YoI= github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a/go.mod h1:TmeOqAKoDinfPfSohs14CO3VcEf7o+Bem6JiNe05yrQ= github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= @@ -170,6 +174,7 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/oschwald/maxminddb-golang v1.2.1-0.20170901134056-26fe5ace1c70 h1:2skBNJsk5emoC/p8tOS9DQL5Ia7ND1UNEdmgfvP+mPI= github.com/oschwald/maxminddb-golang v1.2.1-0.20170901134056-26fe5ace1c70/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -296,6 +301,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9w golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 h1:OK7RB6t2WQX54srQQYSXMW8dF5C6/8+oA/s5QBmmto4= golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -338,5 +345,7 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.2.1 h1:/EPr//+UMMXwMTkXvCCoaJDq8cpjMO80Ou+L4PDo2mY= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= diff --git a/vendor/github.com/Psiphon-Labs/quic-go/README.md b/vendor/github.com/Psiphon-Labs/quic-go/README.md index f33ede45d..5efb4f436 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/README.md +++ b/vendor/github.com/Psiphon-Labs/quic-go/README.md @@ -5,7 +5,8 @@ [![PkgGoDev](https://pkg.go.dev/badge/github.com/lucas-clemente/quic-go)](https://pkg.go.dev/github.com/lucas-clemente/quic-go) [![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/) -quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)). +quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) and Datagram Packetization Layer Path MTU + Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)). In addition to the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/client.go b/vendor/github.com/Psiphon-Labs/quic-go/client.go index e6c03499b..a44ae1dc8 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/client.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/client.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "net" - "strings" "github.com/Psiphon-Labs/quic-go/internal/protocol" "github.com/Psiphon-Labs/quic-go/internal/utils" @@ -232,13 +231,10 @@ func newClient( tlsConf = tlsConf.Clone() } if tlsConf.ServerName == "" { - sni := host - if strings.IndexByte(sni, ':') != -1 { - var err error - sni, _, err = net.SplitHostPort(sni) - if err != nil { - return nil, err - } + sni, _, err := net.SplitHostPort(host) + if err != nil { + // It's ok if net.SplitHostPort returns an error - it could be a hostname/IP address without a port. + sni = host } tlsConf.ServerName = sni diff --git a/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go b/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go index fb367a794..587f54eb2 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go @@ -14,7 +14,7 @@ type connIDGenerator struct { highestSeq uint64 activeSrcConnIDs map[uint64]protocol.ConnectionID - initialClientDestConnID protocol.ConnectionID + initialClientDestConnID *protocol.ConnectionID // nil for the client addConnectionID func(protocol.ConnectionID) getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken @@ -28,7 +28,7 @@ type connIDGenerator struct { func newConnIDGenerator( initialConnectionID protocol.ConnectionID, - initialClientDestConnID protocol.ConnectionID, // nil for the client + initialClientDestConnID *protocol.ConnectionID, // nil for the client addConnectionID func(protocol.ConnectionID), getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken, removeConnectionID func(protocol.ConnectionID), @@ -84,7 +84,7 @@ func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.Connect if !ok { return nil } - if connID.Equal(sentWithDestConnID) { + if connID == sentWithDestConnID { return &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", seq, connID), @@ -117,14 +117,14 @@ func (m *connIDGenerator) issueNewConnID() error { func (m *connIDGenerator) SetHandshakeComplete() { if m.initialClientDestConnID != nil { - m.retireConnectionID(m.initialClientDestConnID) + m.retireConnectionID(*m.initialClientDestConnID) m.initialClientDestConnID = nil } } func (m *connIDGenerator) RemoveAll() { if m.initialClientDestConnID != nil { - m.removeConnectionID(m.initialClientDestConnID) + m.removeConnectionID(*m.initialClientDestConnID) } for _, connID := range m.activeSrcConnIDs { m.removeConnectionID(connID) @@ -134,7 +134,7 @@ func (m *connIDGenerator) RemoveAll() { func (m *connIDGenerator) ReplaceWithClosed(pers protocol.Perspective, connClose []byte) { connIDs := make([]protocol.ConnectionID, 0, len(m.activeSrcConnIDs)+1) if m.initialClientDestConnID != nil { - connIDs = append(connIDs, m.initialClientDestConnID) + connIDs = append(connIDs, *m.initialClientDestConnID) } for _, connID := range m.activeSrcConnIDs { connIDs = append(connIDs, connID) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/conn_id_manager.go b/vendor/github.com/Psiphon-Labs/quic-go/conn_id_manager.go index b9e990e15..25ef13990 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/conn_id_manager.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/conn_id_manager.go @@ -121,7 +121,7 @@ func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID // insert a new element somewhere in the middle for el := h.queue.Front(); el != nil; el = el.Next() { if el.Value.SequenceNumber == seq { - if !el.Value.ConnectionID.Equal(connID) { + if el.Value.ConnectionID != connID { return fmt.Errorf("received conflicting connection IDs for sequence number %d", seq) } if el.Value.StatelessResetToken != resetToken { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/connection.go b/vendor/github.com/Psiphon-Labs/quic-go/connection.go index 944a85abc..5fd81867f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/connection.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/connection.go @@ -25,7 +25,8 @@ import ( ) type unpacker interface { - Unpack(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) + UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) + UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) } type streamGetter interface { @@ -260,7 +261,7 @@ var newConnection = func( logger: logger, version: v, } - if origDestConnID != nil { + if origDestConnID.Len() > 0 { s.logID = origDestConnID.String() } else { s.logID = destConnID.String() @@ -273,7 +274,7 @@ var newConnection = func( ) s.connIDGenerator = newConnIDGenerator( srcConnID, - clientDestConnID, + &clientDestConnID, func(connID protocol.ConnectionID) { runner.Add(connID, s) }, runner.GetStatelessResetToken, runner.Remove, @@ -372,7 +373,7 @@ var newConnection = func( s.perspective, s.version, ) - s.unpacker = newPacketUnpacker(cs, s.version) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version) s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, s.oneRTTStream) return s } @@ -492,7 +493,7 @@ var newClientConnection = func( s.clientHelloWritten = clientHelloWritten s.cryptoStreamHandler = cs s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, newCryptoStream()) - s.unpacker = newPacketUnpacker(cs, s.version) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version) s.packer = newPacketPacker( srcConnID, s.connIDManager.Get, @@ -760,6 +761,7 @@ func (s *connection) ConnectionState() ConnectionState { return ConnectionState{ TLS: s.cryptoStreamHandler.ConnectionState(), SupportsDatagrams: s.supportsDatagrams(), + Version: s.version, } } @@ -890,61 +892,133 @@ func (s *connection) handlePacketImpl(rp *receivedPacket) bool { data := rp.data p := rp for len(data) > 0 { + var destConnID protocol.ConnectionID if counter > 0 { p = p.Clone() p.data = data - } - hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnIDLen) - if err != nil { - if s.tracer != nil { - dropReason := logging.PacketDropHeaderParseError - if err == wire.ErrUnsupportedVersion { - dropReason = logging.PacketDropUnsupportedVersion + var err error + destConnID, err = wire.ParseConnectionID(p.data, s.srcConnIDLen) + if err != nil { + if s.tracer != nil { + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError) } - s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), dropReason) + s.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", err) + break + } + if destConnID != lastConnID { + if s.tracer != nil { + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID) + } + s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", destConnID, lastConnID) + break } - s.logger.Debugf("error parsing packet: %s", err) - break } - if hdr.IsLongHeader && hdr.Version != s.version { - if s.tracer != nil { - s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion) + if wire.IsLongHeaderPacket(p.data[0]) { + hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnIDLen) + if err != nil { + if s.tracer != nil { + dropReason := logging.PacketDropHeaderParseError + if err == wire.ErrUnsupportedVersion { + dropReason = logging.PacketDropUnsupportedVersion + } + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), dropReason) + } + s.logger.Debugf("error parsing packet: %s", err) + break } - s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version) - break - } + lastConnID = hdr.DestConnectionID - if counter > 0 && !hdr.DestConnectionID.Equal(lastConnID) { - if s.tracer != nil { - s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID) + if hdr.Version != s.version { + if s.tracer != nil { + s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion) + } + s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version) + break } - s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", hdr.DestConnectionID, lastConnID) + + if counter > 0 { + p.buffer.Split() + } + counter++ + + // only log if this actually a coalesced packet + if s.logger.Debug() && (counter > 1 || len(rest) > 0) { + s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest)) + } + + p.data = packetData + + if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed { + processed = true + } + data = rest + } else { + if counter > 0 { + p.buffer.Split() + } + processed = s.handleShortHeaderPacket(p, destConnID) break } - lastConnID = hdr.DestConnectionID + } - if counter > 0 { - p.buffer.Split() - } - counter++ + p.buffer.MaybeRelease() + return processed +} + +func (s *connection) handleShortHeaderPacket(p *receivedPacket, destConnID protocol.ConnectionID) bool { + var wasQueued bool - // only log if this actually a coalesced packet - if s.logger.Debug() && (counter > 1 || len(rest) > 0) { - s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest)) + defer func() { + // Put back the packet buffer if the packet wasn't queued for later decryption. + if !wasQueued { + p.buffer.Decrement() } - p.data = packetData - if wasProcessed := s.handleSinglePacket(p, hdr); wasProcessed { - processed = true + }() + + pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data) + if err != nil { + wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT) + return false + } + + if s.logger.Debug() { + s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID) + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, keyPhase) + } + + if s.receivedPacketHandler.IsPotentiallyDuplicate(pn, protocol.Encryption1RTT) { + s.logger.Debugf("Dropping (potentially) duplicate packet.") + if s.tracer != nil { + s.tracer.DroppedPacket(logging.PacketType1RTT, p.Size(), logging.PacketDropDuplicate) } - data = rest + return false } - p.buffer.MaybeRelease() - return processed + + var log func([]logging.Frame) + if s.tracer != nil { + log = func(frames []logging.Frame) { + s.tracer.ReceivedShortHeaderPacket( + &logging.ShortHeader{ + DestConnectionID: destConnID, + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: keyPhase, + }, + p.Size(), + frames, + ) + } + } + if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil { + s.closeLocal(err) + return false + } + return true } -func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ { +func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ { var wasQueued bool defer func() { @@ -960,7 +1034,7 @@ func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) boo // The server can change the source connection ID with the first Handshake packet. // After this, all packets with a different source connection have to be ignored. - if s.receivedFirstPacket && hdr.IsLongHeader && hdr.Type == protocol.PacketTypeInitial && !hdr.SrcConnectionID.Equal(s.handshakeDestConnID) { + if s.receivedFirstPacket && hdr.Type == protocol.PacketTypeInitial && hdr.SrcConnectionID != s.handshakeDestConnID { if s.tracer != nil { s.tracer.DroppedPacket(logging.PacketTypeInitial, p.Size(), logging.PacketDropUnknownConnectionID) } @@ -975,53 +1049,18 @@ func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) boo return false } - packet, err := s.unpacker.Unpack(hdr, p.rcvTime, p.data) + packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data) if err != nil { - switch err { - case handshake.ErrKeysDropped: - if s.tracer != nil { - s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropKeyUnavailable) - } - s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", hdr.PacketType(), p.Size()) - case handshake.ErrKeysNotYetAvailable: - // Sealer for this encryption level not yet available. - // Try again later. - wasQueued = true - s.tryQueueingUndecryptablePacket(p, hdr) - case wire.ErrInvalidReservedBits: - s.closeLocal(&qerr.TransportError{ - ErrorCode: qerr.ProtocolViolation, - ErrorMessage: err.Error(), - }) - case handshake.ErrDecryptionFailed: - // This might be a packet injected by an attacker. Drop it. - if s.tracer != nil { - s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropPayloadDecryptError) - } - s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", hdr.PacketType(), p.Size(), err) - default: - var headerErr *headerParseError - if errors.As(err, &headerErr) { - // This might be a packet injected by an attacker. Drop it. - if s.tracer != nil { - s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropHeaderParseError) - } - s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", hdr.PacketType(), p.Size(), err) - } else { - // This is an error returned by the AEAD (other than ErrDecryptionFailed). - // For example, a PROTOCOL_VIOLATION due to key updates. - s.closeLocal(err) - } - } + wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) return false } if s.logger.Debug() { - s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.packetNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel) + s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.hdr.PacketNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel) packet.hdr.Log(s.logger) } - if s.receivedPacketHandler.IsPotentiallyDuplicate(packet.packetNumber, packet.encryptionLevel) { + if s.receivedPacketHandler.IsPotentiallyDuplicate(packet.hdr.PacketNumber, packet.encryptionLevel) { s.logger.Debugf("Dropping (potentially) duplicate packet.") if s.tracer != nil { s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropDuplicate) @@ -1036,6 +1075,46 @@ func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) boo return true } +func (s *connection) handleUnpackError(err error, p *receivedPacket, pt logging.PacketType) (wasQueued bool) { + switch err { + case handshake.ErrKeysDropped: + if s.tracer != nil { + s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropKeyUnavailable) + } + s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size()) + case handshake.ErrKeysNotYetAvailable: + // Sealer for this encryption level not yet available. + // Try again later. + s.tryQueueingUndecryptablePacket(p, pt) + return true + case wire.ErrInvalidReservedBits: + s.closeLocal(&qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: err.Error(), + }) + case handshake.ErrDecryptionFailed: + // This might be a packet injected by an attacker. Drop it. + if s.tracer != nil { + s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropPayloadDecryptError) + } + s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err) + default: + var headerErr *headerParseError + if errors.As(err, &headerErr) { + // This might be a packet injected by an attacker. Drop it. + if s.tracer != nil { + s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropHeaderParseError) + } + s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err) + } else { + // This is an error returned by the AEAD (other than ErrDecryptionFailed). + // For example, a PROTOCOL_VIOLATION due to key updates. + s.closeLocal(err) + } + } + return false +} + func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte) bool /* was this a valid Retry */ { if s.perspective == protocol.PerspectiveServer { if s.tracer != nil { @@ -1052,7 +1131,7 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte) bool /* wa return false } destConnID := s.connIDManager.Get() - if hdr.SrcConnectionID.Equal(destConnID) { + if hdr.SrcConnectionID == destConnID { if s.tracer != nil { s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket) } @@ -1107,7 +1186,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) { return } - hdr, supportedVersions, err := wire.ParseVersionNegotiationPacket(bytes.NewReader(p.data)) + src, dest, supportedVersions, err := wire.ParseVersionNegotiationPacket(p.data) if err != nil { if s.tracer != nil { s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropHeaderParseError) @@ -1129,7 +1208,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) { s.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", supportedVersions) if s.tracer != nil { - s.tracer.ReceivedVersionNegotiationPacket(hdr, supportedVersions) + s.tracer.ReceivedVersionNegotiationPacket(dest, src, supportedVersions) } newVersion, ok := protocol.ChooseSupportedVersion(s.config.Versions, supportedVersions) if !ok { @@ -1158,13 +1237,6 @@ func (s *connection) handleUnpackedPacket( rcvTime time.Time, packetSize protocol.ByteCount, // only for logging ) error { - if len(packet.data) == 0 { - return &qerr.TransportError{ - ErrorCode: qerr.ProtocolViolation, - ErrorMessage: "empty packet", - } - } - if !s.receivedFirstPacket { s.receivedFirstPacket = true if !s.versionNegotiated && s.tracer != nil { @@ -1178,7 +1250,7 @@ func (s *connection) handleUnpackedPacket( s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions) } // The server can change the source connection ID with the first Handshake packet. - if s.perspective == protocol.PerspectiveClient && packet.hdr.IsLongHeader && !packet.hdr.SrcConnectionID.Equal(s.handshakeDestConnID) { + if s.perspective == protocol.PerspectiveClient && packet.hdr.IsLongHeader && packet.hdr.SrcConnectionID != s.handshakeDestConnID { cid := packet.hdr.SrcConnectionID s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid) s.handshakeDestConnID = cid @@ -1187,10 +1259,10 @@ func (s *connection) handleUnpackedPacket( // We create the connection as soon as we receive the first packet from the client. // We do that before authenticating the packet. // That means that if the source connection ID was corrupted, - // we might have create a connection with an incorrect source connection ID. + // we might have created a connection with an incorrect source connection ID. // Once we authenticate the first packet, we need to update it. if s.perspective == protocol.PerspectiveServer { - if !packet.hdr.SrcConnectionID.Equal(s.handshakeDestConnID) { + if packet.hdr.SrcConnectionID != s.handshakeDestConnID { s.handshakeDestConnID = packet.hdr.SrcConnectionID s.connIDManager.ChangeInitialConnID(packet.hdr.SrcConnectionID) } @@ -1209,16 +1281,53 @@ func (s *connection) handleUnpackedPacket( s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} s.keepAlivePingSent = false + var log func([]logging.Frame) + if s.tracer != nil { + log = func(frames []logging.Frame) { + s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, frames) + } + } + isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log) + if err != nil { + return err + } + return s.receivedPacketHandler.ReceivedPacket(packet.hdr.PacketNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting) +} + +func (s *connection) handleUnpackedShortHeaderPacket( + destConnID protocol.ConnectionID, + pn protocol.PacketNumber, + data []byte, + ecn protocol.ECN, + rcvTime time.Time, + log func([]logging.Frame), +) error { + s.lastPacketReceivedTime = rcvTime + s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} + s.keepAlivePingSent = false + + isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log) + if err != nil { + return err + } + return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting) +} + +func (s *connection) handleFrames( + data []byte, + destConnID protocol.ConnectionID, + encLevel protocol.EncryptionLevel, + log func([]logging.Frame), +) (isAckEliciting bool, _ error) { // Only used for tracing. // If we're not tracing, this slice will always remain empty. var frames []wire.Frame - r := bytes.NewReader(packet.data) - var isAckEliciting bool - for { - frame, err := s.frameParser.ParseNext(r, packet.encryptionLevel) + for len(data) > 0 { + l, frame, err := s.frameParser.ParseNext(data, encLevel) if err != nil { - return err + return false, err } + data = data[l:] if frame == nil { break } @@ -1227,29 +1336,28 @@ func (s *connection) handleUnpackedPacket( } // Only process frames now if we're not logging. // If we're logging, we need to make sure that the packet_received event is logged first. - if s.tracer == nil { - if err := s.handleFrame(frame, packet.encryptionLevel, packet.hdr.DestConnectionID); err != nil { - return err + if log == nil { + if err := s.handleFrame(frame, encLevel, destConnID); err != nil { + return false, err } } else { frames = append(frames, frame) } } - if s.tracer != nil { + if log != nil { fs := make([]logging.Frame, len(frames)) for i, frame := range frames { fs[i] = logutils.ConvertFrame(frame) } - s.tracer.ReceivedPacket(packet.hdr, packetSize, fs) + log(fs) for _, frame := range frames { - if err := s.handleFrame(frame, packet.encryptionLevel, packet.hdr.DestConnectionID); err != nil { - return err + if err := s.handleFrame(frame, encLevel, destConnID); err != nil { + return false, err } } } - - return s.receivedPacketHandler.ReceivedPacket(packet.packetNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting) + return } func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error { @@ -1262,6 +1370,7 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel err = s.handleStreamFrame(frame) case *wire.AckFrame: err = s.handleAckFrame(frame, encLevel) + wire.PutAckFrame(frame) case *wire.ConnectionCloseFrame: s.handleConnectionCloseFrame(frame) case *wire.ResetStreamFrame: @@ -1636,7 +1745,7 @@ func (s *connection) checkTransportParameters(params *wire.TransportParameters) } // check the initial_source_connection_id - if !params.InitialSourceConnectionID.Equal(s.handshakeDestConnID) { + if params.InitialSourceConnectionID != s.handshakeDestConnID { return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", s.handshakeDestConnID, params.InitialSourceConnectionID) } @@ -1644,14 +1753,14 @@ func (s *connection) checkTransportParameters(params *wire.TransportParameters) return nil } // check the original_destination_connection_id - if !params.OriginalDestinationConnectionID.Equal(s.origDestConnID) { + if params.OriginalDestinationConnectionID != s.origDestConnID { return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", s.origDestConnID, params.OriginalDestinationConnectionID) } if s.retrySrcConnID != nil { // a Retry was performed if params.RetrySourceConnectionID == nil { return errors.New("missing retry_source_connection_id") } - if !(*params.RetrySourceConnectionID).Equal(*s.retrySrcConnID) { + if *params.RetrySourceConnectionID != *s.retrySrcConnID { return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", s.retrySrcConnID, *params.RetrySourceConnectionID) } } else if params.RetrySourceConnectionID != nil { @@ -1748,7 +1857,24 @@ func (s *connection) sendPackets() error { } func (s *connection) maybeSendAckOnlyPacket() error { - packet, err := s.packer.MaybePackAckPacket(s.handshakeConfirmed) + if !s.handshakeConfirmed { + packet, err := s.packer.PackCoalescedPacket(true) + if err != nil { + return err + } + if packet == nil { + return nil + } + s.logCoalescedPacket(packet) + for _, p := range packet.packets { + s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(time.Now(), s.retransmissionQueue)) + } + s.connIDManager.SentPacket() + s.sendQueue.Send(packet.buffer) + return nil + } + + packet, err := s.packer.PackPacket(true) if err != nil { return err } @@ -1809,7 +1935,7 @@ func (s *connection) sendPacket() (bool, error) { now := time.Now() if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket() + packet, err := s.packer.PackCoalescedPacket(false) if err != nil || packet == nil { return false, err } @@ -1833,7 +1959,7 @@ func (s *connection) sendPacket() (bool, error) { s.sendPackedPacket(packet, now) return true, nil } - packet, err := s.packer.PackPacket() + packet, err := s.packer.PackPacket(false) if err != nil || packet == nil { return false, err } @@ -1880,7 +2006,11 @@ func (s *connection) logPacketContents(p *packetContents) { for _, f := range p.frames { frames = append(frames, logutils.ConvertFrame(f.Frame)) } - s.tracer.SentPacket(p.header, p.length, p.ack, frames) + var ack *logging.AckFrame + if p.ack != nil { + ack = logutils.ConvertAckFrame(p.ack) + } + s.tracer.SentPacket(p.header, p.length, ack, frames) } // quic-go logging @@ -1971,20 +2101,22 @@ func (s *connection) scheduleSending() { } } -func (s *connection) tryQueueingUndecryptablePacket(p *receivedPacket, hdr *wire.Header) { +// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys. +// The logging.PacketType is only used for logging purposes. +func (s *connection) tryQueueingUndecryptablePacket(p *receivedPacket, pt logging.PacketType) { if s.handshakeComplete { panic("shouldn't queue undecryptable packets after handshake completion") } if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets { if s.tracer != nil { - s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropDOSPrevention) + s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropDOSPrevention) } s.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", p.Size()) return } s.logger.Infof("Queueing packet (%d bytes) for later decryption", p.Size()) if s.tracer != nil { - s.tracer.BufferedPacket(logging.PacketTypeFromHeader(hdr)) + s.tracer.BufferedPacket(pt, p.Size()) } s.undecryptablePackets = append(s.undecryptablePackets, p) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/datagram_queue.go b/vendor/github.com/Psiphon-Labs/quic-go/datagram_queue.go index 29b270a95..a88666f93 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/datagram_queue.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/datagram_queue.go @@ -8,6 +8,7 @@ import ( type datagramQueue struct { sendQueue chan *wire.DatagramFrame + nextFrame *wire.DatagramFrame rcvQueue chan []byte closeErr error @@ -49,15 +50,26 @@ func (h *datagramQueue) AddAndWait(f *wire.DatagramFrame) error { } } -// Get dequeues a DATAGRAM frame for sending. -func (h *datagramQueue) Get() *wire.DatagramFrame { +// Peek gets the next DATAGRAM frame for sending. +// If actually sent out, Pop needs to be called before the next call to Peek. +func (h *datagramQueue) Peek() *wire.DatagramFrame { + if h.nextFrame != nil { + return h.nextFrame + } select { - case f := <-h.sendQueue: + case h.nextFrame = <-h.sendQueue: h.dequeued <- struct{}{} - return f default: return nil } + return h.nextFrame +} + +func (h *datagramQueue) Pop() { + if h.nextFrame == nil { + panic("datagramQueue BUG: Pop called for nil frame") + } + h.nextFrame = nil } // HandleDatagramFrame handles a received DATAGRAM frame. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/body.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/body.go index b30de660c..e50a2f9f2 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/body.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/body.go @@ -18,12 +18,15 @@ type HTTPStreamer interface { } type StreamCreator interface { + // Context returns a context that is cancelled when the underlying connection is closed. + Context() context.Context OpenStream() (quic.Stream, error) OpenStreamSync(context.Context) (quic.Stream, error) OpenUniStream() (quic.SendStream, error) OpenUniStreamSync(context.Context) (quic.SendStream, error) LocalAddr() net.Addr RemoteAddr() net.Addr + ConnectionState() quic.ConnectionState } var _ StreamCreator = quic.Connection(nil) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/capsule.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/capsule.go new file mode 100644 index 000000000..0588b5ee6 --- /dev/null +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/capsule.go @@ -0,0 +1,55 @@ +package http3 + +import ( + "io" + + "github.com/Psiphon-Labs/quic-go/quicvarint" +) + +// CapsuleType is the type of the capsule. +type CapsuleType uint64 + +type exactReader struct { + R *io.LimitedReader +} + +func (r *exactReader) Read(b []byte) (int, error) { + n, err := r.R.Read(b) + if r.R.N > 0 { + return n, io.ErrUnexpectedEOF + } + return n, err +} + +// ParseCapsule parses the header of a Capsule. +// It returns an io.LimitedReader that can be used to read the Capsule value. +// The Capsule value must be read entirely (i.e. until the io.EOF) before using r again. +func ParseCapsule(r quicvarint.Reader) (CapsuleType, io.Reader, error) { + ct, err := quicvarint.Read(r) + if err != nil { + if err == io.EOF { + return 0, nil, io.ErrUnexpectedEOF + } + return 0, nil, err + } + l, err := quicvarint.Read(r) + if err != nil { + if err == io.EOF { + return 0, nil, io.ErrUnexpectedEOF + } + return 0, nil, err + } + return CapsuleType(ct), &exactReader{R: io.LimitReader(r, int64(l)).(*io.LimitedReader)}, nil +} + +// WriteCapsule writes a capsule +func WriteCapsule(w quicvarint.Writer, ct CapsuleType, value []byte) error { + b := make([]byte, 0, 16) + b = quicvarint.Append(b, uint64(ct)) + b = quicvarint.Append(b, uint64(len(value))) + if _, err := w.Write(b); err != nil { + return err + } + _, err := w.Write(value) + return err +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go index 1fe64688d..d3b20de5b 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go @@ -1,7 +1,6 @@ package http3 import ( - "bytes" "context" "crypto/tls" "errors" @@ -152,11 +151,11 @@ func (c *client) setupConn() error { if err != nil { return err } - buf := &bytes.Buffer{} - quicvarint.Write(buf, streamTypeControlStream) + b := make([]byte, 0, 64) + b = quicvarint.Append(b, streamTypeControlStream) // send the SETTINGS frame - (&settingsFrame{Datagram: c.opts.EnableDatagram, Other: c.opts.AdditionalSettings}).Write(buf) - _, err = str.Write(buf.Bytes()) + b = (&settingsFrame{Datagram: c.opts.EnableDatagram, Other: c.opts.AdditionalSettings}).Append(b) + _, err = str.Write(b) return err } @@ -414,6 +413,7 @@ func (c *client) doRequest(req *http.Request, str quic.Stream, opt RoundTripOpt, ProtoMajor: 3, Header: http.Header{}, TLS: &connState, + Request: req, } for _, hf := range hfs { switch hf.Name { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/frames.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/frames.go index 6f9b23e43..063e78c87 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/frames.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/frames.go @@ -74,18 +74,18 @@ type dataFrame struct { Length uint64 } -func (f *dataFrame) Write(b *bytes.Buffer) { - quicvarint.Write(b, 0x0) - quicvarint.Write(b, f.Length) +func (f *dataFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x0) + return quicvarint.Append(b, f.Length) } type headersFrame struct { Length uint64 } -func (f *headersFrame) Write(b *bytes.Buffer) { - quicvarint.Write(b, 0x1) - quicvarint.Write(b, f.Length) +func (f *headersFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x1) + return quicvarint.Append(b, f.Length) } const settingDatagram = 0xffd277 @@ -142,8 +142,8 @@ func parseSettingsFrame(r io.Reader, l uint64) (*settingsFrame, error) { return frame, nil } -func (f *settingsFrame) Write(b *bytes.Buffer) { - quicvarint.Write(b, 0x4) +func (f *settingsFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x4) var l protocol.ByteCount for id, val := range f.Other { l += quicvarint.Len(id) + quicvarint.Len(val) @@ -151,13 +151,14 @@ func (f *settingsFrame) Write(b *bytes.Buffer) { if f.Datagram { l += quicvarint.Len(settingDatagram) + quicvarint.Len(1) } - quicvarint.Write(b, uint64(l)) + b = quicvarint.Append(b, uint64(l)) if f.Datagram { - quicvarint.Write(b, settingDatagram) - quicvarint.Write(b, 1) + b = quicvarint.Append(b, settingDatagram) + b = quicvarint.Append(b, 1) } for id, val := range f.Other { - quicvarint.Write(b, id) - quicvarint.Write(b, val) + b = quicvarint.Append(b, id) + b = quicvarint.Append(b, val) } + return b } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/http_stream.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/http_stream.go index a5112dce2..062dd370f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/http_stream.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/http_stream.go @@ -1,7 +1,6 @@ package http3 import ( - "bytes" "fmt" "github.com/Psiphon-Labs/quic-go" @@ -16,6 +15,8 @@ type Stream quic.Stream type stream struct { quic.Stream + buf []byte + onFrameError func() bytesRemainingInFrame uint64 } @@ -23,7 +24,11 @@ type stream struct { var _ Stream = &stream{} func newStream(str quic.Stream, onFrameError func()) *stream { - return &stream{Stream: str, onFrameError: onFrameError} + return &stream{ + Stream: str, + onFrameError: onFrameError, + buf: make([]byte, 0, 16), + } } func (s *stream) Read(b []byte) (int, error) { @@ -62,9 +67,9 @@ func (s *stream) Read(b []byte) (int, error) { } func (s *stream) Write(b []byte) (int, error) { - buf := &bytes.Buffer{} - (&dataFrame{Length: uint64(len(b))}).Write(buf) - if _, err := s.Stream.Write(buf.Bytes()); err != nil { + s.buf = s.buf[:0] + s.buf = (&dataFrame{Length: uint64(len(b))}).Append(s.buf) + if _, err := s.Stream.Write(s.buf); err != nil { return 0, err } return s.Stream.Write(b) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go index 86862dfa3..b219c577f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go @@ -58,10 +58,9 @@ func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool) return err } - buf := &bytes.Buffer{} - hf := headersFrame{Length: uint64(w.headerBuf.Len())} - hf.Write(buf) - if _, err := wr.Write(buf.Bytes()); err != nil { + b := make([]byte, 0, 128) + b = (&headersFrame{Length: uint64(w.headerBuf.Len())}).Append(b) + if _, err := wr.Write(b); err != nil { return err } _, err := wr.Write(w.headerBuf.Bytes()) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go index 9f957cf33..2501fe022 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go @@ -15,6 +15,7 @@ import ( type responseWriter struct { conn quic.Connection bufferedStr *bufio.Writer + buf []byte header http.Header status int // status code passed to WriteHeader @@ -32,6 +33,7 @@ var ( func newResponseWriter(str quic.Stream, conn quic.Connection, logger utils.Logger) *responseWriter { return &responseWriter{ header: http.Header{}, + buf: make([]byte, 16), conn: conn, bufferedStr: bufio.NewWriter(str), logger: logger, @@ -62,10 +64,10 @@ func (w *responseWriter) WriteHeader(status int) { } } - buf := &bytes.Buffer{} - (&headersFrame{Length: uint64(headers.Len())}).Write(buf) + w.buf = w.buf[:0] + w.buf = (&headersFrame{Length: uint64(headers.Len())}).Append(w.buf) w.logger.Infof("Responding with %d", status) - if _, err := w.bufferedStr.Write(buf.Bytes()); err != nil { + if _, err := w.bufferedStr.Write(w.buf); err != nil { w.logger.Errorf("could not write headers frame: %s", err.Error()) } if _, err := w.bufferedStr.Write(headers.Bytes()); err != nil { @@ -84,9 +86,9 @@ func (w *responseWriter) Write(p []byte) (int, error) { return 0, http.ErrBodyNotAllowed } df := &dataFrame{Length: uint64(len(p))} - buf := &bytes.Buffer{} - df.Write(buf) - if _, err := w.bufferedStr.Write(buf.Bytes()); err != nil { + w.buf = w.buf[:0] + w.buf = df.Append(w.buf) + if _, err := w.bufferedStr.Write(w.buf); err != nil { return 0, err } return w.bufferedStr.Write(p) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go index 81866ac6b..553b7eba1 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go @@ -1,7 +1,6 @@ package http3 import ( - "bytes" "context" "crypto/tls" "errors" @@ -29,8 +28,10 @@ var ( ) const ( - nextProtoH3Draft29 = "h3-29" - nextProtoH3 = "h3" + // NextProtoH3Draft29 is the ALPN protocol negotiated during the TLS handshake, for QUIC draft 29. + NextProtoH3Draft29 = "h3-29" + // NextProtoH3 is the ALPN protocol negotiated during the TLS handshake, for QUIC v1 and v2. + NextProtoH3 = "h3" ) // StreamType is the stream type of a unidirectional stream. @@ -45,10 +46,10 @@ const ( func versionToALPN(v protocol.VersionNumber) string { if v == protocol.Version1 || v == protocol.Version2 { - return nextProtoH3 + return NextProtoH3 } if v == protocol.VersionTLS || v == protocol.VersionDraft29 { - return nextProtoH3Draft29 + return NextProtoH3Draft29 } return "" } @@ -63,7 +64,7 @@ func ConfigureTLSConfig(tlsConf *tls.Config) *tls.Config { return &tls.Config{ GetConfigForClient: func(ch *tls.ClientHelloInfo) (*tls.Config, error) { // determine the ALPN from the QUIC version used - proto := nextProtoH3 + proto := NextProtoH3 if qconn, ok := ch.Conn.(handshake.ConnWithVersion); ok { proto = versionToALPN(qconn.GetQUICVersion()) } @@ -225,11 +226,22 @@ func (s *Server) ListenAndServeTLS(certFile, keyFile string) error { // Serve an existing UDP connection. // It is possible to reuse the same connection for outgoing connections. -// Closing the server does not close the packet conn. +// Closing the server does not close the connection. func (s *Server) Serve(conn net.PacketConn) error { return s.serveConn(s.TLSConfig, conn) } +// ServeQUICConn serves a single QUIC connection. +func (s *Server) ServeQUICConn(conn quic.Connection) error { + s.mutex.Lock() + if s.logger == nil { + s.logger = utils.DefaultLogger.WithPrefix("server") + } + s.mutex.Unlock() + + return s.handleConn(conn) +} + // ServeListener serves an existing QUIC listener. // Make sure you use http3.ConfigureTLSConfig to configure a tls.Config // and use it to construct a http3-friendly QUIC listener. @@ -296,7 +308,11 @@ func (s *Server) serveListener(ln quic.EarlyListener) error { if err != nil { return err } - go s.handleConn(conn) + go func() { + if err := s.handleConn(conn); err != nil { + s.logger.Debugf(err.Error()) + } + }() } } @@ -404,19 +420,18 @@ func (s *Server) removeListener(l *quic.EarlyListener) { s.mutex.Unlock() } -func (s *Server) handleConn(conn quic.EarlyConnection) { +func (s *Server) handleConn(conn quic.Connection) error { decoder := qpack.NewDecoder(nil) // send a SETTINGS frame str, err := conn.OpenUniStream() if err != nil { - s.logger.Debugf("Opening the control stream failed.") - return + return fmt.Errorf("opening the control stream failed: %w", err) } - buf := &bytes.Buffer{} - quicvarint.Write(buf, streamTypeControlStream) // stream type - (&settingsFrame{Datagram: s.EnableDatagrams, Other: s.AdditionalSettings}).Write(buf) - str.Write(buf.Bytes()) + b := make([]byte, 0, 64) + b = quicvarint.Append(b, streamTypeControlStream) // stream type + b = (&settingsFrame{Datagram: s.EnableDatagrams, Other: s.AdditionalSettings}).Append(b) + str.Write(b) go s.handleUnidirectionalStreams(conn) @@ -425,8 +440,11 @@ func (s *Server) handleConn(conn quic.EarlyConnection) { for { str, err := conn.AcceptStream(context.Background()) if err != nil { - s.logger.Debugf("Accepting stream failed: %s", err) - return + var appErr *quic.ApplicationError + if errors.As(err, &appErr) && appErr.ErrorCode == quic.ApplicationErrorCode(errorNoError) { + return nil + } + return fmt.Errorf("accepting stream failed: %w", err) } go func() { rerr := s.handleRequest(conn, str, decoder, func() { @@ -454,7 +472,7 @@ func (s *Server) handleConn(conn quic.EarlyConnection) { } } -func (s *Server) handleUnidirectionalStreams(conn quic.EarlyConnection) { +func (s *Server) handleUnidirectionalStreams(conn quic.Connection) { for { str, err := conn.AcceptUniStream(context.Background()) if err != nil { @@ -577,12 +595,15 @@ func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *q func() { defer func() { if p := recover(); p != nil { + panicked = true + if p == http.ErrAbortHandler { + return + } // Copied from net/http/server.go const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] s.logger.Errorf("http: panic serving: %v\n%s", p, buf) - panicked = true } }() handler.ServeHTTP(r, req) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/interface.go b/vendor/github.com/Psiphon-Labs/quic-go/interface.go index 1b343a23f..e97f352a2 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/interface.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/interface.go @@ -174,7 +174,7 @@ type Connection interface { // CloseWithError closes the connection with an error. // The error string will be sent to the peer. CloseWithError(ApplicationErrorCode, string) error - // The context is cancelled when the connection is closed. + // Context returns a context that is cancelled when the connection is closed. Context() context.Context // ConnectionState returns basic details about the QUIC connection. // It blocks until the handshake completes. @@ -202,6 +202,20 @@ type EarlyConnection interface { NextConnection() Connection } +// StatelessResetKey is a key used to derive stateless reset tokens. +type StatelessResetKey [32]byte + +// A ConnectionID is a QUIC Connection ID, as defined in RFC 9000. +// It is not able to handle QUIC Connection IDs longer than 20 bytes, +// as they are allowed by RFC 8999. +type ConnectionID = protocol.ConnectionID + +// ConnectionIDFromBytes interprets b as a Connection ID. It panics if b is +// longer than 20 bytes. +func ConnectionIDFromBytes(b []byte) ConnectionID { + return protocol.ParseConnectionID(b) +} + // A ConnectionIDGenerator is an interface that allows clients to implement their own format // for the Connection IDs that servers/clients use as SrcConnectionID in QUIC packets. // @@ -209,7 +223,7 @@ type EarlyConnection interface { type ConnectionIDGenerator interface { // GenerateConnectionID generates a new ConnectionID. // Generated ConnectionIDs should be unique and observers should not be able to correlate two ConnectionIDs. - GenerateConnectionID() ([]byte, error) + GenerateConnectionID() (ConnectionID, error) // ConnectionIDLen tells what is the length of the ConnectionIDs generated by the implementation of // this interface. @@ -286,7 +300,7 @@ type Config struct { // limit the memory usage. // To avoid deadlocks, it is not valid to call other functions on the connection or on streams // in this callback. - AllowConnectionWindowIncrease func(sess Connection, delta uint64) bool + AllowConnectionWindowIncrease func(conn Connection, delta uint64) bool // MaxIncomingStreams is the maximum number of concurrent bidirectional streams that a peer is allowed to open. // Values above 2^60 are invalid. // If not set, it will default to 100. @@ -299,7 +313,7 @@ type Config struct { MaxIncomingUniStreams int64 // The StatelessResetKey is used to generate stateless reset tokens. // If no key is configured, sending of stateless resets is disabled. - StatelessResetKey []byte + StatelessResetKey *StatelessResetKey // KeepAlivePeriod defines whether this peer will periodically send a packet to keep the connection alive. // If set to 0, then no keep alive is sent. Otherwise, the keep alive is sent on that period (or at most // every half of MaxIdleTimeout, whichever is smaller). @@ -312,8 +326,7 @@ type Config struct { // This can be useful if version information is exchanged out-of-band. // It has no effect for a client. DisableVersionNegotiationPackets bool - // See https://datatracker.ietf.org/doc/draft-ietf-quic-datagram/. - // Datagrams will only be available when both peers enable datagram support. + // Enable QUIC datagram support (RFC 9221). EnableDatagrams bool Tracer logging.Tracer @@ -356,6 +369,7 @@ type Config struct { type ConnectionState struct { TLS handshake.ConnectionState SupportsDatagrams bool + Version VersionNumber } // A Listener for incoming QUIC connections diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/interfaces.go index ec19da6c9..6718bccc3 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/interfaces.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/interfaces.go @@ -7,26 +7,6 @@ import ( "github.com/Psiphon-Labs/quic-go/internal/wire" ) -// A Packet is a packet -type Packet struct { - PacketNumber protocol.PacketNumber - Frames []Frame - LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK - Length protocol.ByteCount - EncryptionLevel protocol.EncryptionLevel - SendTime time.Time - - IsPathMTUProbePacket bool // We don't report the loss of Path MTU probe packets to the congestion controller. - - includedInBytesInFlight bool - declaredLost bool - skippedPacket bool -} - -func (p *Packet) outstanding() bool { - return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket -} - // SentPacketHandler handles ACKs received for outgoing packets type SentPacketHandler interface { // SentPacket may modify the packet diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go new file mode 100644 index 000000000..5311ad86a --- /dev/null +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go @@ -0,0 +1,49 @@ +package ackhandler + +import ( + "sync" + "time" + + "github.com/Psiphon-Labs/quic-go/internal/protocol" +) + +// A Packet is a packet +type Packet struct { + PacketNumber protocol.PacketNumber + Frames []Frame + LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK + Length protocol.ByteCount + EncryptionLevel protocol.EncryptionLevel + SendTime time.Time + + IsPathMTUProbePacket bool // We don't report the loss of Path MTU probe packets to the congestion controller. + + includedInBytesInFlight bool + declaredLost bool + skippedPacket bool +} + +func (p *Packet) outstanding() bool { + return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket +} + +var packetPool = sync.Pool{New: func() any { return &Packet{} }} + +func GetPacket() *Packet { + p := packetPool.Get().(*Packet) + p.PacketNumber = 0 + p.Frames = nil + p.LargestAcked = 0 + p.Length = 0 + p.EncryptionLevel = protocol.EncryptionLevel(0) + p.SendTime = time.Time{} + p.IsPathMTUProbePacket = false + p.includedInBytesInFlight = false + p.declaredLost = false + p.skippedPacket = false + return p +} + +// We currently only return Packets back into the pool when they're acknowledged (not when they're lost). +// This simplifies the code, and gives the vast majority of the performance benefit we can gain from using the pool. +func putPacket(p *Packet) { packetPool.Put(p) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go index bded8d437..142af2aa3 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go @@ -46,24 +46,26 @@ func (h *receivedPacketHandler) ReceivedPacket( h.sentPackets.ReceivedPacket(encLevel) switch encLevel { case protocol.EncryptionInitial: - h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) + return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) case protocol.EncryptionHandshake: - h.handshakePackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) + return h.handshakePackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) case protocol.Encryption0RTT: if h.lowest1RTTPacket != protocol.InvalidPacketNumber && pn > h.lowest1RTTPacket { return fmt.Errorf("received packet number %d on a 0-RTT packet after receiving %d on a 1-RTT packet", pn, h.lowest1RTTPacket) } - h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) + return h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) case protocol.Encryption1RTT: if h.lowest1RTTPacket == protocol.InvalidPacketNumber || pn < h.lowest1RTTPacket { h.lowest1RTTPacket = pn } + if err := h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck); err != nil { + return err + } h.appDataPackets.IgnoreBelow(h.sentPackets.GetLowestPacketNotConfirmedAcked()) - h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck) + return nil default: panic(fmt.Sprintf("received packet with unknown encryption level: %s", encLevel)) } - return nil } func (h *receivedPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go index fece07163..1e9726117 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go @@ -107,17 +107,12 @@ func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { } } -// GetAckRanges gets a slice of all AckRanges that can be used in an AckFrame -func (h *receivedPacketHistory) GetAckRanges() []wire.AckRange { - if h.ranges.Len() == 0 { - return nil - } - - ackRanges := make([]wire.AckRange, h.ranges.Len()) - i := 0 - for el := h.ranges.Back(); el != nil; el = el.Prev() { - ackRanges[i] = wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End} - i++ +// AppendAckRanges appends to a slice of all AckRanges that can be used in an AckFrame +func (h *receivedPacketHistory) AppendAckRanges(ackRanges []wire.AckRange) []wire.AckRange { + if h.ranges.Len() > 0 { + for el := h.ranges.Back(); el != nil; el = el.Prev() { + ackRanges = append(ackRanges, wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End}) + } } return ackRanges } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go index b2ae3498c..770bc5baf 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go @@ -1,6 +1,7 @@ package ackhandler import ( + "fmt" "time" "github.com/Psiphon-Labs/quic-go/internal/protocol" @@ -48,9 +49,9 @@ func newReceivedPacketTracker( } } -func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumber, ecn protocol.ECN, rcvTime time.Time, shouldInstigateAck bool) { - if packetNumber < h.ignoreBelow { - return +func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumber, ecn protocol.ECN, rcvTime time.Time, shouldInstigateAck bool) error { + if isNew := h.packetHistory.ReceivedPacket(packetNumber); !isNew { + return fmt.Errorf("recevedPacketTracker BUG: ReceivedPacket called for old / duplicate packet %d", packetNumber) } isMissing := h.isMissing(packetNumber) @@ -59,7 +60,7 @@ func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumbe h.largestObservedReceivedTime = rcvTime } - if isNew := h.packetHistory.ReceivedPacket(packetNumber); isNew && shouldInstigateAck { + if shouldInstigateAck { h.hasNewAck = true } if shouldInstigateAck { @@ -74,6 +75,7 @@ func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumbe case protocol.ECNCE: h.ecnce++ } + return nil } // IgnoreBelow sets a lower limit for acknowledging packets. @@ -171,16 +173,16 @@ func (h *receivedPacketTracker) GetAckFrame(onlyIfQueued bool) *wire.AckFrame { } } - ack := &wire.AckFrame{ - AckRanges: h.packetHistory.GetAckRanges(), - // Make sure that the DelayTime is always positive. - // This is not guaranteed on systems that don't have a monotonic clock. - DelayTime: utils.Max(0, now.Sub(h.largestObservedReceivedTime)), - ECT0: h.ect0, - ECT1: h.ect1, - ECNCE: h.ecnce, - } + ack := wire.GetAckFrame() + ack.DelayTime = utils.Max(0, now.Sub(h.largestObservedReceivedTime)) + ack.ECT0 = h.ect0 + ack.ECT1 = h.ect1 + ack.ECNCE = h.ecnce + ack.AckRanges = h.packetHistory.AppendAckRanges(ack.AckRanges) + if h.lastAck != nil { + wire.PutAckFrame(h.lastAck) + } h.lastAck = ack h.ackAlarm = time.Time{} h.ackQueued = false diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_handler.go index 7b41743e4..5b28aa701 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_handler.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_handler.go @@ -23,6 +23,8 @@ const ( amplificationFactor = 3 // We use Retry packets to derive an RTT estimate. Make sure we don't set the RTT to a super low value yet. minRTTAfterRetry = 5 * time.Millisecond + // The PTO duration uses exponential backoff, but is truncated to a maximum value, as allowed by RFC 8961, section 4.4. + maxPTODuration = 60 * time.Second ) type packetNumberSpace struct { @@ -226,14 +228,20 @@ func (h *sentPacketHandler) packetsInFlight() int { return packetsInFlight } -func (h *sentPacketHandler) SentPacket(packet *Packet) { - h.bytesSent += packet.Length +func (h *sentPacketHandler) SentPacket(p *Packet) { + h.bytesSent += p.Length // For the client, drop the Initial packet number space when the first Handshake packet is sent. - if h.perspective == protocol.PerspectiveClient && packet.EncryptionLevel == protocol.EncryptionHandshake && h.initialPackets != nil { + if h.perspective == protocol.PerspectiveClient && p.EncryptionLevel == protocol.EncryptionHandshake && h.initialPackets != nil { h.dropPackets(protocol.EncryptionInitial) } - isAckEliciting := h.sentPacketImpl(packet) - h.getPacketNumberSpace(packet.EncryptionLevel).history.SentPacket(packet, isAckEliciting) + isAckEliciting := h.sentPacketImpl(p) + if isAckEliciting { + h.getPacketNumberSpace(p.EncryptionLevel).history.SentAckElicitingPacket(p) + } else { + h.getPacketNumberSpace(p.EncryptionLevel).history.SentNonAckElicitingPacket(p.PacketNumber, p.EncryptionLevel, p.SendTime) + putPacket(p) + p = nil //nolint:ineffassign // This is just to be on the safe side. + } if h.tracer != nil && isAckEliciting { h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) } @@ -334,7 +342,11 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En acked1RTTPacket = true } h.removeFromBytesInFlight(p) + putPacket(p) } + // After this point, we must not use ackedPackets any longer! + // We've already returned the buffers. + ackedPackets = nil //nolint:ineffassign // This is just to be on the safe side. // Reset the pto_count unless the client is unsure if the server has validated the client's address. if h.peerCompletedAddressValidation { @@ -447,6 +459,14 @@ func (h *sentPacketHandler) getLossTimeAndSpace() (time.Time, protocol.Encryptio return lossTime, encLevel } +func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration { + pto := h.rttStats.PTO(includeMaxAckDelay) << h.ptoCount + if pto > maxPTODuration || pto <= 0 { + return maxPTODuration + } + return pto +} + // same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) { // We only send application data probe packets once the handshake is confirmed, @@ -455,7 +475,7 @@ func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protoc if h.peerCompletedAddressValidation { return } - t := time.Now().Add(h.rttStats.PTO(false) << h.ptoCount) + t := time.Now().Add(h.getScaledPTO(false)) if h.initialPackets != nil { return t, protocol.EncryptionInitial, true } @@ -465,18 +485,18 @@ func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protoc if h.initialPackets != nil { encLevel = protocol.EncryptionInitial if t := h.initialPackets.lastAckElicitingPacketTime; !t.IsZero() { - pto = t.Add(h.rttStats.PTO(false) << h.ptoCount) + pto = t.Add(h.getScaledPTO(false)) } } if h.handshakePackets != nil && !h.handshakePackets.lastAckElicitingPacketTime.IsZero() { - t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.rttStats.PTO(false) << h.ptoCount) + t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(false)) if pto.IsZero() || (!t.IsZero() && t.Before(pto)) { pto = t encLevel = protocol.EncryptionHandshake } } if h.handshakeConfirmed && !h.appDataPackets.lastAckElicitingPacketTime.IsZero() { - t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.rttStats.PTO(true) << h.ptoCount) + t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(true)) if pto.IsZero() || (!t.IsZero() && t.Before(pto)) { pto = t encLevel = protocol.Encryption1RTT diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go index a90451947..73f50af9e 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go @@ -11,47 +11,53 @@ import ( type sentPacketHistory struct { rttStats *utils.RTTStats - outstandingPacketList *list.List[Packet] - etcPacketList *list.List[Packet] - packetMap map[protocol.PacketNumber]*list.Element[Packet] + outstandingPacketList *list.List[*Packet] + etcPacketList *list.List[*Packet] + packetMap map[protocol.PacketNumber]*list.Element[*Packet] highestSent protocol.PacketNumber } func newSentPacketHistory(rttStats *utils.RTTStats) *sentPacketHistory { return &sentPacketHistory{ rttStats: rttStats, - outstandingPacketList: list.New[Packet](), - etcPacketList: list.New[Packet](), - packetMap: make(map[protocol.PacketNumber]*list.Element[Packet]), + outstandingPacketList: list.New[*Packet](), + etcPacketList: list.New[*Packet](), + packetMap: make(map[protocol.PacketNumber]*list.Element[*Packet]), highestSent: protocol.InvalidPacketNumber, } } -func (h *sentPacketHistory) SentPacket(p *Packet, isAckEliciting bool) { - if p.PacketNumber <= h.highestSent { +func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel, t time.Time) { + h.registerSentPacket(pn, encLevel, t) +} + +func (h *sentPacketHistory) SentAckElicitingPacket(p *Packet) { + h.registerSentPacket(p.PacketNumber, p.EncryptionLevel, p.SendTime) + + var el *list.Element[*Packet] + if p.outstanding() { + el = h.outstandingPacketList.PushBack(p) + } else { + el = h.etcPacketList.PushBack(p) + } + h.packetMap[p.PacketNumber] = el +} + +func (h *sentPacketHistory) registerSentPacket(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel, t time.Time) { + if pn <= h.highestSent { panic("non-sequential packet number use") } // Skipped packet numbers. - for pn := h.highestSent + 1; pn < p.PacketNumber; pn++ { - el := h.etcPacketList.PushBack(Packet{ - PacketNumber: pn, - EncryptionLevel: p.EncryptionLevel, - SendTime: p.SendTime, + for p := h.highestSent + 1; p < pn; p++ { + el := h.etcPacketList.PushBack(&Packet{ + PacketNumber: p, + EncryptionLevel: encLevel, + SendTime: t, skippedPacket: true, }) - h.packetMap[pn] = el - } - h.highestSent = p.PacketNumber - - if isAckEliciting { - var el *list.Element[Packet] - if p.outstanding() { - el = h.outstandingPacketList.PushBack(*p) - } else { - el = h.etcPacketList.PushBack(*p) - } - h.packetMap[p.PacketNumber] = el + h.packetMap[p] = el } + h.highestSent = pn } // Iterate iterates through all packets. @@ -59,7 +65,7 @@ func (h *sentPacketHistory) Iterate(cb func(*Packet) (cont bool, err error)) err cont := true outstandingEl := h.outstandingPacketList.Front() etcEl := h.etcPacketList.Front() - var el *list.Element[Packet] + var el *list.Element[*Packet] // whichever has the next packet number is returned first for cont { if outstandingEl == nil || (etcEl != nil && etcEl.Value.PacketNumber < outstandingEl.Value.PacketNumber) { @@ -76,7 +82,7 @@ func (h *sentPacketHistory) Iterate(cb func(*Packet) (cont bool, err error)) err etcEl = etcEl.Next() } var err error - cont, err = cb(&el.Value) + cont, err = cb(el.Value) if err != nil { return err } @@ -90,7 +96,7 @@ func (h *sentPacketHistory) FirstOutstanding() *Packet { if el == nil { return nil } - return &el.Value + return el.Value } func (h *sentPacketHistory) Len() int { @@ -114,7 +120,7 @@ func (h *sentPacketHistory) HasOutstandingPackets() bool { func (h *sentPacketHistory) DeleteOldPackets(now time.Time) { maxAge := 3 * h.rttStats.PTO(false) - var nextEl *list.Element[Packet] + var nextEl *list.Element[*Packet] // we don't iterate outstandingPacketList, as we should not delete outstanding packets. // being outstanding for more than 3*PTO should only happen in the case of drastic RTT changes. for el := h.etcPacketList.Front(); el != nil; el = nextEl { @@ -145,10 +151,10 @@ func (h *sentPacketHistory) DeclareLost(p *Packet) *Packet { } } if el == nil { - el = h.etcPacketList.PushFront(*p) + el = h.etcPacketList.PushFront(p) } else { - el = h.etcPacketList.InsertAfter(*p, el) + el = h.etcPacketList.InsertAfter(p, el) } h.packetMap[p.PacketNumber] = el - return &el.Value + return el.Value } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/flowcontrol/base_flow_controller.go index 7d2eea0c3..0e53ea0dd 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/flowcontrol/base_flow_controller.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/flowcontrol/base_flow_controller.go @@ -47,7 +47,7 @@ func (c *baseFlowController) AddBytesSent(n protocol.ByteCount) { c.bytesSent += n } -// UpdateSendWindow is be called after receiving a MAX_{STREAM_}DATA frame. +// UpdateSendWindow is called after receiving a MAX_{STREAM_}DATA frame. func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) { if offset > c.sendWindow { c.sendWindow = offset diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go index 585e3d760..25effda89 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go @@ -365,7 +365,7 @@ func (h *cryptoSetup) onError(alert uint8, message string) { if alert == 0 { err = &qerr.TransportError{ErrorCode: qerr.InternalError, ErrorMessage: message} } else { - err = qerr.NewCryptoError(alert, message) + err = qerr.NewLocalCryptoError(alert, message) } h.runner.OnError(err) } @@ -457,11 +457,10 @@ func (h *cryptoSetup) handleTransportParameters(data []byte) { // must be called after receiving the transport parameters func (h *cryptoSetup) marshalDataForSessionState() []byte { - buf := &bytes.Buffer{} - quicvarint.Write(buf, clientSessionStateRevision) - quicvarint.Write(buf, uint64(h.rttStats.SmoothedRTT().Microseconds())) - h.peerParams.MarshalForSessionTicket(buf) - return buf.Bytes() + b := make([]byte, 0, 256) + b = quicvarint.Append(b, clientSessionStateRevision) + b = quicvarint.Append(b, uint64(h.rttStats.SmoothedRTT().Microseconds())) + return h.peerParams.MarshalForSessionTicket(b) } func (h *cryptoSetup) handleDataFromSessionState(data []byte) { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go index 0e71b28f4..2fa9810c3 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go @@ -62,7 +62,7 @@ func NewInitialAEAD(connID protocol.ConnectionID, pers protocol.Perspective, v p } func computeSecrets(connID protocol.ConnectionID, v protocol.VersionNumber) (clientSecret, serverSecret []byte) { - initialSecret := hkdf.Extract(crypto.SHA256.New, connID, getSalt(v)) + initialSecret := hkdf.Extract(crypto.SHA256.New, connID.Bytes(), getSalt(v)) clientSecret = hkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "client in", crypto.SHA256.Size()) serverSecret = hkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "server in", crypto.SHA256.Size()) return diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/session_ticket.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/session_ticket.go index 0a7913fda..fc4a04205 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/session_ticket.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/session_ticket.go @@ -18,11 +18,10 @@ type sessionTicket struct { } func (t *sessionTicket) Marshal() []byte { - b := &bytes.Buffer{} - quicvarint.Write(b, sessionTicketRevision) - quicvarint.Write(b, uint64(t.RTT.Microseconds())) - t.Parameters.MarshalForSessionTicket(b) - return b.Bytes() + b := make([]byte, 0, 256) + b = quicvarint.Append(b, sessionTicketRevision) + b = quicvarint.Append(b, uint64(t.RTT.Microseconds())) + return t.Parameters.MarshalForSessionTicket(b) } func (t *sessionTicket) Unmarshal(b []byte) error { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/token_generator.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/token_generator.go index ee8009720..579871856 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/token_generator.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/token_generator.go @@ -65,8 +65,8 @@ func (g *TokenGenerator) NewRetryToken( data, err := asn1.Marshal(token{ IsRetryToken: true, RemoteAddr: encodeRemoteAddr(raddr), - OriginalDestConnectionID: origDestConnID, - RetrySrcConnectionID: retrySrcConnID, + OriginalDestConnectionID: origDestConnID.Bytes(), + RetrySrcConnectionID: retrySrcConnID.Bytes(), Timestamp: time.Now().UnixNano(), }) if err != nil { @@ -112,8 +112,8 @@ func (g *TokenGenerator) DecodeToken(encrypted []byte) (*Token, error) { encodedRemoteAddr: t.RemoteAddr, } if t.IsRetryToken { - token.OriginalDestConnectionID = protocol.ConnectionID(t.OriginalDestConnectionID) - token.RetrySrcConnectionID = protocol.ConnectionID(t.RetrySrcConnectionID) + token.OriginalDestConnectionID = protocol.ParseConnectionID(t.OriginalDestConnectionID) + token.RetrySrcConnectionID = protocol.ParseConnectionID(t.RetrySrcConnectionID) } return token, nil } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/logutils/frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/logutils/frame.go index d40c9083d..2fe66ef04 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/logutils/frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/logutils/frame.go @@ -11,6 +11,10 @@ import ( // Furthermore, it removes the data slices from CRYPTO and STREAM frames. func ConvertFrame(frame wire.Frame) logging.Frame { switch f := frame.(type) { + case *wire.AckFrame: + // We use a pool for ACK frames. + // Implementations of the tracer interface may hold on to frames, so we need to make a copy here. + return ConvertAckFrame(f) case *wire.CryptoFrame: return &logging.CryptoFrame{ Offset: f.Offset, @@ -31,3 +35,16 @@ func ConvertFrame(frame wire.Frame) logging.Frame { return logging.Frame(frame) } } + +func ConvertAckFrame(f *wire.AckFrame) *logging.AckFrame { + ranges := make([]wire.AckRange, 0, len(f.AckRanges)) + ranges = append(ranges, f.AckRanges...) + ack := &logging.AckFrame{ + AckRanges: ranges, + DelayTime: f.DelayTime, + ECNCE: f.ECNCE, + ECT0: f.ECT0, + ECT1: f.ECT1, + } + return ack +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/connection_id.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/connection_id.go index 7ae7d9dcf..77259b5fa 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/connection_id.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/connection_id.go @@ -1,24 +1,60 @@ package protocol import ( - "bytes" "crypto/rand" + "errors" "fmt" "io" ) -// A ConnectionID in QUIC -type ConnectionID []byte +var ErrInvalidConnectionIDLen = errors.New("invalid Connection ID length") + +// An ArbitraryLenConnectionID is a QUIC Connection ID able to represent Connection IDs according to RFC 8999. +// Future QUIC versions might allow connection ID lengths up to 255 bytes, while QUIC v1 +// restricts the length to 20 bytes. +type ArbitraryLenConnectionID []byte + +func (c ArbitraryLenConnectionID) Len() int { + return len(c) +} + +func (c ArbitraryLenConnectionID) Bytes() []byte { + return c +} + +func (c ArbitraryLenConnectionID) String() string { + if c.Len() == 0 { + return "(empty)" + } + return fmt.Sprintf("%x", c.Bytes()) +} const maxConnectionIDLen = 20 +// A ConnectionID in QUIC +type ConnectionID struct { + b [20]byte + l uint8 +} + // GenerateConnectionID generates a connection ID using cryptographic random -func GenerateConnectionID(len int) (ConnectionID, error) { - b := make([]byte, len) - if _, err := rand.Read(b); err != nil { - return nil, err +func GenerateConnectionID(l int) (ConnectionID, error) { + var c ConnectionID + c.l = uint8(l) + _, err := rand.Read(c.b[:l]) + return c, err +} + +// ParseConnectionID interprets b as a Connection ID. +// It panics if b is longer than 20 bytes. +func ParseConnectionID(b []byte) ConnectionID { + if len(b) > maxConnectionIDLen { + panic("invalid conn id length") } - return ConnectionID(b), nil + var c ConnectionID + c.l = uint8(len(b)) + copy(c.b[:c.l], b) + return c } // GenerateConnectionIDForInitial generates a connection ID for the Initial packet. @@ -26,39 +62,38 @@ func GenerateConnectionID(len int) (ConnectionID, error) { func GenerateConnectionIDForInitial() (ConnectionID, error) { r := make([]byte, 1) if _, err := rand.Read(r); err != nil { - return nil, err + return ConnectionID{}, err } - len := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1) - return GenerateConnectionID(len) + l := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1) + return GenerateConnectionID(l) } // ReadConnectionID reads a connection ID of length len from the given io.Reader. // It returns io.EOF if there are not enough bytes to read. -func ReadConnectionID(r io.Reader, len int) (ConnectionID, error) { - if len == 0 { - return nil, nil +func ReadConnectionID(r io.Reader, l int) (ConnectionID, error) { + var c ConnectionID + if l == 0 { + return c, nil + } + if l > maxConnectionIDLen { + return c, ErrInvalidConnectionIDLen } - c := make(ConnectionID, len) - _, err := io.ReadFull(r, c) + c.l = uint8(l) + _, err := io.ReadFull(r, c.b[:l]) if err == io.ErrUnexpectedEOF { - return nil, io.EOF + return c, io.EOF } return c, err } -// Equal says if two connection IDs are equal -func (c ConnectionID) Equal(other ConnectionID) bool { - return bytes.Equal(c, other) -} - // Len returns the length of the connection ID in bytes func (c ConnectionID) Len() int { - return len(c) + return int(c.l) } // Bytes returns the byte representation func (c ConnectionID) Bytes() []byte { - return []byte(c) + return c.b[:c.l] } func (c ConnectionID) String() string { @@ -72,7 +107,7 @@ type DefaultConnectionIDGenerator struct { ConnLen int } -func (d *DefaultConnectionIDGenerator) GenerateConnectionID() ([]byte, error) { +func (d *DefaultConnectionIDGenerator) GenerateConnectionID() (ConnectionID, error) { return GenerateConnectionID(d.ConnLen) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/params.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/params.go index 831371139..60c867794 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/params.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/params.go @@ -134,7 +134,7 @@ const MaxAckFrameSize ByteCount = 1000 // MaxDatagramFrameSize is the maximum size of a DATAGRAM frame (RFC 9221). // The size is chosen such that a DATAGRAM frame fits into a QUIC packet. -const MaxDatagramFrameSize ByteCount = 1220 +const MaxDatagramFrameSize ByteCount = 1200 // DatagramRcvQueueLen is the length of the receive queue for DATAGRAM frames (RFC 9221) const DatagramRcvQueueLen = 128 diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/error_codes.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/error_codes.go index 05ff7834d..5fa580e02 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/error_codes.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/error_codes.go @@ -81,7 +81,7 @@ func (e TransportErrorCode) String() string { return "NO_VIABLE_PATH" default: if e.IsCryptoError() { - return fmt.Sprintf("CRYPTO_ERROR (%#x)", uint16(e)) + return fmt.Sprintf("CRYPTO_ERROR %#x", uint16(e)) } return fmt.Sprintf("unknown error code: %#x", uint16(e)) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/errors.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/errors.go index 443254d26..24123df00 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/errors.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/qerr/errors.go @@ -21,8 +21,8 @@ type TransportError struct { var _ error = &TransportError{} -// NewCryptoError create a new TransportError instance for a crypto error -func NewCryptoError(tlsAlert uint8, errorMessage string) *TransportError { +// NewLocalCryptoError create a new TransportError instance for a crypto error +func NewLocalCryptoError(tlsAlert uint8, errorMessage string) *TransportError { return &TransportError{ ErrorCode: 0x100 + TransportErrorCode(tlsAlert), ErrorMessage: errorMessage, @@ -30,7 +30,7 @@ func NewCryptoError(tlsAlert uint8, errorMessage string) *TransportError { } func (e *TransportError) Error() string { - str := e.ErrorCode.String() + str := fmt.Sprintf("%s (%s)", e.ErrorCode.String(), getRole(e.Remote)) if e.FrameType != 0 { str += fmt.Sprintf(" (frame type: %#x)", e.FrameType) } @@ -68,9 +68,9 @@ var _ error = &ApplicationError{} func (e *ApplicationError) Error() string { if len(e.ErrorMessage) == 0 { - return fmt.Sprintf("Application error %#x", e.ErrorCode) + return fmt.Sprintf("Application error %#x (%s)", e.ErrorCode, getRole(e.Remote)) } - return fmt.Sprintf("Application error %#x: %s", e.ErrorCode, e.ErrorMessage) + return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage) } type IdleTimeoutError struct{} @@ -122,3 +122,10 @@ func (e *StatelessResetError) Is(target error) bool { func (e *StatelessResetError) Timeout() bool { return false } func (e *StatelessResetError) Temporary() bool { return true } + +func getRole(remote bool) string { + if remote { + return "remote" + } + return "local" +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go119.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go119.go index 80e766cda..0a7596936 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go119.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go119.go @@ -1,4 +1,4 @@ -//go:build go1.19 +//go:build go1.19 && !go1.20 package qtls diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go120.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go120.go index f35538729..d5f0c751b 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go120.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go120.go @@ -2,4 +2,109 @@ package qtls -var _ int = "The version of quic-go you're using can't be built on Go 1.20 yet. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions." +// [Psiphon] +// For now, use qtls-go1-19 with Go 1.20. Psiphon does not yet require the +// updates in Go 1.20 crypto/tls, and the crypto/tls.Config in Go 1.20 is +// compatible with qtls-go1-19. + +import ( + "crypto" + "crypto/cipher" + "crypto/tls" + "net" + "unsafe" + + "github.com/Psiphon-Labs/qtls-go1-19" +) + +type ( + // Alert is a TLS alert + Alert = qtls.Alert + // A Certificate is qtls.Certificate. + Certificate = qtls.Certificate + // CertificateRequestInfo contains information about a certificate request. + CertificateRequestInfo = qtls.CertificateRequestInfo + // A CipherSuiteTLS13 is a cipher suite for TLS 1.3 + CipherSuiteTLS13 = qtls.CipherSuiteTLS13 + // ClientHelloInfo contains information about a ClientHello. + ClientHelloInfo = qtls.ClientHelloInfo + // ClientSessionCache is a cache used for session resumption. + ClientSessionCache = qtls.ClientSessionCache + // ClientSessionState is a state needed for session resumption. + ClientSessionState = qtls.ClientSessionState + // A Config is a qtls.Config. + Config = qtls.Config + // A Conn is a qtls.Conn. + Conn = qtls.Conn + // ConnectionState contains information about the state of the connection. + ConnectionState = qtls.ConnectionStateWith0RTT + // EncryptionLevel is the encryption level of a message. + EncryptionLevel = qtls.EncryptionLevel + // Extension is a TLS extension + Extension = qtls.Extension + // ExtraConfig is the qtls.ExtraConfig + ExtraConfig = qtls.ExtraConfig + // RecordLayer is a qtls RecordLayer. + RecordLayer = qtls.RecordLayer +) + +const ( + // EncryptionHandshake is the Handshake encryption level + EncryptionHandshake = qtls.EncryptionHandshake + // Encryption0RTT is the 0-RTT encryption level + Encryption0RTT = qtls.Encryption0RTT + // EncryptionApplication is the application data encryption level + EncryptionApplication = qtls.EncryptionApplication +) + +// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3 +func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD { + return qtls.AEADAESGCMTLS13(key, fixedNonce) +} + +// Client returns a new TLS client side connection. +func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn { + return qtls.Client(conn, config, extraConfig) +} + +// Server returns a new TLS server side connection. +func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn { + return qtls.Server(conn, config, extraConfig) +} + +func GetConnectionState(conn *Conn) ConnectionState { + return conn.ConnectionStateWith0RTT() +} + +// ToTLSConnectionState extracts the tls.ConnectionState +func ToTLSConnectionState(cs ConnectionState) tls.ConnectionState { + return cs.ConnectionState +} + +type cipherSuiteTLS13 struct { + ID uint16 + KeyLen int + AEAD func(key, fixedNonce []byte) cipher.AEAD + Hash crypto.Hash +} + +//go:linkname cipherSuiteTLS13ByID github.com/Psiphon-Labs/qtls-go1-19.cipherSuiteTLS13ByID +func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 + +// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite. +func CipherSuiteTLS13ByID(id uint16) *CipherSuiteTLS13 { + val := cipherSuiteTLS13ByID(id) + cs := (*cipherSuiteTLS13)(unsafe.Pointer(val)) + return &qtls.CipherSuiteTLS13{ + ID: cs.ID, + KeyLen: cs.KeyLen, + AEAD: cs.AEAD, + Hash: cs.Hash, + } +} + +// [Psiphon] + +func ReadClientHelloRandom(data []byte) ([]byte, error) { + return qtls.ReadClientHelloRandom(data) +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder.go index d1f528429..a9b715e2f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder.go @@ -7,6 +7,10 @@ import ( // A ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit unsigned integers. type ByteOrder interface { + Uint32([]byte) uint32 + Uint24([]byte) uint32 + Uint16([]byte) uint16 + ReadUint32(io.ByteReader) (uint32, error) ReadUint24(io.ByteReader) (uint32, error) ReadUint16(io.ByteReader) (uint16, error) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder_big_endian.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder_big_endian.go index d05542e1d..834a711b9 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder_big_endian.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/byteorder_big_endian.go @@ -2,6 +2,7 @@ package utils import ( "bytes" + "encoding/binary" "io" ) @@ -73,6 +74,19 @@ func (bigEndian) ReadUint16(b io.ByteReader) (uint16, error) { return uint16(b1) + uint16(b2)<<8, nil } +func (bigEndian) Uint32(b []byte) uint32 { + return binary.BigEndian.Uint32(b) +} + +func (bigEndian) Uint24(b []byte) uint32 { + _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +} + +func (bigEndian) Uint16(b []byte) uint16 { + return binary.BigEndian.Uint16(b) +} + // WriteUint32 writes a uint32 func (bigEndian) WriteUint32(b *bytes.Buffer, i uint32) { b.Write([]byte{uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i)}) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame.go index cb92da8e9..b1b5a282f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame.go @@ -29,7 +29,7 @@ func parseAckFrame(r *bytes.Reader, ackDelayExponent uint8, _ protocol.VersionNu } ecn := typeByte&0x1 > 0 - frame := &AckFrame{} + frame := GetAckFrame() la, err := quicvarint.Read(r) if err != nil { @@ -106,41 +106,41 @@ func parseAckFrame(r *bytes.Reader, ackDelayExponent uint8, _ protocol.VersionNu return frame, nil } -// Write writes an ACK frame. -func (f *AckFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { +// Append appends an ACK frame. +func (f *AckFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { hasECN := f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 if hasECN { - b.WriteByte(0x3) + b = append(b, 0b11) } else { - b.WriteByte(0x2) + b = append(b, 0b10) } - quicvarint.Write(b, uint64(f.LargestAcked())) - quicvarint.Write(b, encodeAckDelay(f.DelayTime)) + b = quicvarint.Append(b, uint64(f.LargestAcked())) + b = quicvarint.Append(b, encodeAckDelay(f.DelayTime)) numRanges := f.numEncodableAckRanges() - quicvarint.Write(b, uint64(numRanges-1)) + b = quicvarint.Append(b, uint64(numRanges-1)) // write the first range _, firstRange := f.encodeAckRange(0) - quicvarint.Write(b, firstRange) + b = quicvarint.Append(b, firstRange) // write all the other range for i := 1; i < numRanges; i++ { gap, len := f.encodeAckRange(i) - quicvarint.Write(b, gap) - quicvarint.Write(b, len) + b = quicvarint.Append(b, gap) + b = quicvarint.Append(b, len) } if hasECN { - quicvarint.Write(b, f.ECT0) - quicvarint.Write(b, f.ECT1) - quicvarint.Write(b, f.ECNCE) + b = quicvarint.Append(b, f.ECT0) + b = quicvarint.Append(b, f.ECT1) + b = quicvarint.Append(b, f.ECNCE) } - return nil + return b, nil } // Length of a written frame -func (f *AckFrame) Length(version protocol.VersionNumber) protocol.ByteCount { +func (f *AckFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { largestAcked := f.AckRanges[0].Largest numRanges := f.numEncodableAckRanges() diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame_pool.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame_pool.go new file mode 100644 index 000000000..a0c6a21d7 --- /dev/null +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ack_frame_pool.go @@ -0,0 +1,24 @@ +package wire + +import "sync" + +var ackFramePool = sync.Pool{New: func() any { + return &AckFrame{} +}} + +func GetAckFrame() *AckFrame { + f := ackFramePool.Get().(*AckFrame) + f.AckRanges = f.AckRanges[:0] + f.ECNCE = 0 + f.ECT0 = 0 + f.ECT1 = 0 + f.DelayTime = 0 + return f +} + +func PutAckFrame(f *AckFrame) { + if cap(f.AckRanges) > 4 { + return + } + ackFramePool.Put(f) +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/connection_close_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/connection_close_frame.go index 8ccdff954..098b140bd 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/connection_close_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/connection_close_frame.go @@ -66,18 +66,18 @@ func (f *ConnectionCloseFrame) Length(protocol.VersionNumber) protocol.ByteCount return length } -func (f *ConnectionCloseFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { +func (f *ConnectionCloseFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { if f.IsApplicationError { - b.WriteByte(0x1d) + b = append(b, 0x1d) } else { - b.WriteByte(0x1c) + b = append(b, 0x1c) } - quicvarint.Write(b, f.ErrorCode) + b = quicvarint.Append(b, f.ErrorCode) if !f.IsApplicationError { - quicvarint.Write(b, f.FrameType) + b = quicvarint.Append(b, f.FrameType) } - quicvarint.Write(b, uint64(len(f.ReasonPhrase))) - b.WriteString(f.ReasonPhrase) - return nil + b = quicvarint.Append(b, uint64(len(f.ReasonPhrase))) + b = append(b, []byte(f.ReasonPhrase)...) + return b, nil } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/crypto_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/crypto_frame.go index dc5dc0ab1..0d55387ca 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/crypto_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/crypto_frame.go @@ -42,12 +42,12 @@ func parseCryptoFrame(r *bytes.Reader, _ protocol.VersionNumber) (*CryptoFrame, return frame, nil } -func (f *CryptoFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x6) - quicvarint.Write(b, uint64(f.Offset)) - quicvarint.Write(b, uint64(len(f.Data))) - b.Write(f.Data) - return nil +func (f *CryptoFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x6) + b = quicvarint.Append(b, uint64(f.Offset)) + b = quicvarint.Append(b, uint64(len(f.Data))) + b = append(b, f.Data...) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/data_blocked_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/data_blocked_frame.go index c3bb3ab9d..afdc6ef39 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/data_blocked_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/data_blocked_frame.go @@ -25,11 +25,10 @@ func parseDataBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*DataBloc }, nil } -func (f *DataBlockedFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - typeByte := uint8(0x14) - b.WriteByte(typeByte) - quicvarint.Write(b, uint64(f.MaximumData)) - return nil +func (f *DataBlockedFrame) Append(b []byte, version protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x14) + b = quicvarint.Append(b, uint64(f.MaximumData)) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/datagram_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/datagram_frame.go index c7ef15de0..f21fb3e94 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/datagram_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/datagram_frame.go @@ -44,17 +44,17 @@ func parseDatagramFrame(r *bytes.Reader, _ protocol.VersionNumber) (*DatagramFra return f, nil } -func (f *DatagramFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { +func (f *DatagramFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { typeByte := uint8(0x30) if f.DataLenPresent { - typeByte ^= 0x1 + typeByte ^= 0b1 } - b.WriteByte(typeByte) + b = append(b, typeByte) if f.DataLenPresent { - quicvarint.Write(b, uint64(len(f.Data))) + b = quicvarint.Append(b, uint64(len(f.Data))) } - b.Write(f.Data) - return nil + b = append(b, f.Data...) + return b, nil } // MaxDataLen returns the maximum data length diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go index 80f561b2f..3bebb7249 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go @@ -11,6 +11,8 @@ import ( ) type frameParser struct { + r bytes.Reader // cached bytes.Reader, so we don't have to repeatedly allocate them + ackDelayExponent uint8 supportsDatagrams bool @@ -21,6 +23,7 @@ type frameParser struct { // NewFrameParser creates a new frame parser. func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParser { return &frameParser{ + r: *bytes.NewReader(nil), supportsDatagrams: supportsDatagrams, version: v, } @@ -28,9 +31,18 @@ func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParse // ParseNext parses the next frame. // It skips PADDING frames. -func (p *frameParser) ParseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) { +func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel) (int, Frame, error) { + startLen := len(data) + p.r.Reset(data) + frame, err := p.parseNext(&p.r, encLevel) + n := startLen - p.r.Len() + p.r.Reset(nil) + return n, frame, err +} + +func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) { for r.Len() != 0 { - typeByte, _ := r.ReadByte() + typeByte, _ := p.r.ReadByte() if typeByte == 0x0 { // PADDING frame continue } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/handshake_done_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/handshake_done_frame.go index ebd24c4f1..cdc12c9b0 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/handshake_done_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/handshake_done_frame.go @@ -17,9 +17,8 @@ func parseHandshakeDoneFrame(r *bytes.Reader, _ protocol.VersionNumber) (*Handsh return &HandshakeDoneFrame{}, nil } -func (f *HandshakeDoneFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x1e) - return nil +func (f *HandshakeDoneFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + return append(b, 0x1e), nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go index 9b26b39a9..07012ade6 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go @@ -17,22 +17,63 @@ import ( // That means that the connection ID must not be used after the packet buffer is released. func ParseConnectionID(data []byte, shortHeaderConnIDLen int) (protocol.ConnectionID, error) { if len(data) == 0 { - return nil, io.EOF + return protocol.ConnectionID{}, io.EOF } if !IsLongHeaderPacket(data[0]) { if len(data) < shortHeaderConnIDLen+1 { - return nil, io.EOF + return protocol.ConnectionID{}, io.EOF } - return protocol.ConnectionID(data[1 : 1+shortHeaderConnIDLen]), nil + return protocol.ParseConnectionID(data[1 : 1+shortHeaderConnIDLen]), nil } if len(data) < 6 { - return nil, io.EOF + return protocol.ConnectionID{}, io.EOF } destConnIDLen := int(data[5]) + if destConnIDLen > protocol.MaxConnIDLen { + return protocol.ConnectionID{}, protocol.ErrInvalidConnectionIDLen + } if len(data) < 6+destConnIDLen { - return nil, io.EOF + return protocol.ConnectionID{}, io.EOF + } + return protocol.ParseConnectionID(data[6 : 6+destConnIDLen]), nil +} + +// ParseArbitraryLenConnectionIDs parses the most general form of a Long Header packet, +// using only the version-independent packet format as described in Section 5.1 of RFC 8999: +// https://datatracker.ietf.org/doc/html/rfc8999#section-5.1. +// This function should only be called on Long Header packets for which we don't support the version. +func ParseArbitraryLenConnectionIDs(data []byte) (bytesParsed int, dest, src protocol.ArbitraryLenConnectionID, _ error) { + r := bytes.NewReader(data) + remaining := r.Len() + src, dest, err := parseArbitraryLenConnectionIDs(r) + return remaining - r.Len(), src, dest, err +} + +func parseArbitraryLenConnectionIDs(r *bytes.Reader) (dest, src protocol.ArbitraryLenConnectionID, _ error) { + r.Seek(5, io.SeekStart) // skip first byte and version field + destConnIDLen, err := r.ReadByte() + if err != nil { + return nil, nil, err + } + destConnID := make(protocol.ArbitraryLenConnectionID, destConnIDLen) + if _, err := io.ReadFull(r, destConnID); err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + return nil, nil, err + } + srcConnIDLen, err := r.ReadByte() + if err != nil { + return nil, nil, err + } + srcConnID := make(protocol.ArbitraryLenConnectionID, srcConnIDLen) + if _, err := io.ReadFull(r, srcConnID); err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + return nil, nil, err } - return protocol.ConnectionID(data[6 : 6+destConnIDLen]), nil + return destConnID, srcConnID, nil } // IsLongHeaderPacket says if this is a Long Header packet @@ -40,6 +81,15 @@ func IsLongHeaderPacket(firstByte byte) bool { return firstByte&0x80 > 0 } +// ParseVersion parses the QUIC version. +// It should only be called for Long Header packets (Short Header packets don't contain a version number). +func ParseVersion(data []byte) (protocol.VersionNumber, error) { + if len(data) < 5 { + return 0, io.EOF + } + return protocol.VersionNumber(binary.BigEndian.Uint32(data[1:5])), nil +} + // IsVersionNegotiationPacket says if this is a version negotiation packet func IsVersionNegotiationPacket(b []byte) bool { if len(b) < 5 { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go index f58f8794d..c9c27744f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go @@ -1,19 +1,17 @@ package wire import ( - "bytes" - "github.com/Psiphon-Labs/quic-go/internal/protocol" ) // A Frame in QUIC type Frame interface { - Write(b *bytes.Buffer, version protocol.VersionNumber) error + Append(b []byte, version protocol.VersionNumber) ([]byte, error) Length(version protocol.VersionNumber) protocol.ByteCount } // A FrameParser parses QUIC frames, one by one. type FrameParser interface { - ParseNext(*bytes.Reader, protocol.EncryptionLevel) (Frame, error) + ParseNext([]byte, protocol.EncryptionLevel) (int, Frame, error) SetAckDelayExponent(uint8) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_data_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_data_frame.go index 307f7c6e5..c7893e855 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_data_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_data_frame.go @@ -28,13 +28,13 @@ func parseMaxDataFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxDataFrame } // Write writes a MAX_STREAM_DATA frame -func (f *MaxDataFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x10) - quicvarint.Write(b, uint64(f.MaximumData)) - return nil +func (f *MaxDataFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x10) + b = quicvarint.Append(b, uint64(f.MaximumData)) + return b, nil } // Length of a written frame -func (f *MaxDataFrame) Length(version protocol.VersionNumber) protocol.ByteCount { +func (f *MaxDataFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { return 1 + quicvarint.Len(uint64(f.MaximumData)) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_stream_data_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_stream_data_frame.go index 8b13a6582..ba996ad36 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_stream_data_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_stream_data_frame.go @@ -33,11 +33,11 @@ func parseMaxStreamDataFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxStr }, nil } -func (f *MaxStreamDataFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x11) - quicvarint.Write(b, uint64(f.StreamID)) - quicvarint.Write(b, uint64(f.MaximumStreamData)) - return nil +func (f *MaxStreamDataFrame) Append(b []byte, version protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x11) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.MaximumStreamData)) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_streams_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_streams_frame.go index 2baff9167..e51566546 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_streams_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/max_streams_frame.go @@ -38,15 +38,15 @@ func parseMaxStreamsFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxStream return f, nil } -func (f *MaxStreamsFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { +func (f *MaxStreamsFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { switch f.Type { case protocol.StreamTypeBidi: - b.WriteByte(0x12) + b = append(b, 0x12) case protocol.StreamTypeUni: - b.WriteByte(0x13) + b = append(b, 0x13) } - quicvarint.Write(b, uint64(f.MaxStreamNum)) - return nil + b = quicvarint.Append(b, uint64(f.MaxStreamNum)) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_connection_id_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_connection_id_frame.go index eaa3a7c18..6b8d6762a 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_connection_id_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_connection_id_frame.go @@ -38,9 +38,6 @@ func parseNewConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewC if err != nil { return nil, err } - if connIDLen > protocol.MaxConnIDLen { - return nil, fmt.Errorf("invalid connection ID length: %d", connIDLen) - } connID, err := protocol.ReadConnectionID(r, int(connIDLen)) if err != nil { return nil, err @@ -60,18 +57,18 @@ func parseNewConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewC return frame, nil } -func (f *NewConnectionIDFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x18) - quicvarint.Write(b, f.SequenceNumber) - quicvarint.Write(b, f.RetirePriorTo) +func (f *NewConnectionIDFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x18) + b = quicvarint.Append(b, f.SequenceNumber) + b = quicvarint.Append(b, f.RetirePriorTo) connIDLen := f.ConnectionID.Len() if connIDLen > protocol.MaxConnIDLen { - return fmt.Errorf("invalid connection ID length: %d", connIDLen) + return nil, fmt.Errorf("invalid connection ID length: %d", connIDLen) } - b.WriteByte(uint8(connIDLen)) - b.Write(f.ConnectionID.Bytes()) - b.Write(f.StatelessResetToken[:]) - return nil + b = append(b, uint8(connIDLen)) + b = append(b, f.ConnectionID.Bytes()...) + b = append(b, f.StatelessResetToken[:]...) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_token_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_token_frame.go index 0457ddda7..850694f98 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_token_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/new_token_frame.go @@ -35,11 +35,11 @@ func parseNewTokenFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewTokenFra return &NewTokenFrame{Token: token}, nil } -func (f *NewTokenFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x7) - quicvarint.Write(b, uint64(len(f.Token))) - b.Write(f.Token) - return nil +func (f *NewTokenFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x7) + b = quicvarint.Append(b, uint64(len(f.Token))) + b = append(b, f.Token...) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_challenge_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_challenge_frame.go index cb9bc5de3..a8bef17de 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_challenge_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_challenge_frame.go @@ -26,10 +26,10 @@ func parsePathChallengeFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PathCh return frame, nil } -func (f *PathChallengeFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x1a) - b.Write(f.Data[:]) - return nil +func (f *PathChallengeFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x1a) + b = append(b, f.Data[:]...) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_response_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_response_frame.go index 9e4bb62a9..5470a8365 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_response_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/path_response_frame.go @@ -26,10 +26,10 @@ func parsePathResponseFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PathRes return frame, nil } -func (f *PathResponseFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x1b) - b.Write(f.Data[:]) - return nil +func (f *PathResponseFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x1b) + b = append(b, f.Data[:]...) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ping_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ping_frame.go index f8daabd53..8b10b1ced 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ping_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/ping_frame.go @@ -16,12 +16,11 @@ func parsePingFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PingFrame, erro return &PingFrame{}, nil } -func (f *PingFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x1) - return nil +func (f *PingFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + return append(b, 0x1), nil } // Length of a written frame -func (f *PingFrame) Length(version protocol.VersionNumber) protocol.ByteCount { +func (f *PingFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { return 1 } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/reset_stream_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/reset_stream_frame.go index cbc65caa1..8229ccb6a 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/reset_stream_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/reset_stream_frame.go @@ -44,12 +44,12 @@ func parseResetStreamFrame(r *bytes.Reader, _ protocol.VersionNumber) (*ResetStr }, nil } -func (f *ResetStreamFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x4) - quicvarint.Write(b, uint64(f.StreamID)) - quicvarint.Write(b, uint64(f.ErrorCode)) - quicvarint.Write(b, uint64(f.FinalSize)) - return nil +func (f *ResetStreamFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x4) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.ErrorCode)) + b = quicvarint.Append(b, uint64(f.FinalSize)) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/retire_connection_id_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/retire_connection_id_frame.go index 2a71c3eb7..08ff7651e 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/retire_connection_id_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/retire_connection_id_frame.go @@ -24,10 +24,10 @@ func parseRetireConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*R return &RetireConnectionIDFrame{SequenceNumber: seq}, nil } -func (f *RetireConnectionIDFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x19) - quicvarint.Write(b, f.SequenceNumber) - return nil +func (f *RetireConnectionIDFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x19) + b = quicvarint.Append(b, f.SequenceNumber) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go new file mode 100644 index 000000000..9d241e413 --- /dev/null +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go @@ -0,0 +1,55 @@ +package wire + +import ( + "errors" + "fmt" + "io" + + "github.com/Psiphon-Labs/quic-go/internal/protocol" + "github.com/Psiphon-Labs/quic-go/internal/utils" +) + +func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.PacketNumber, _ protocol.PacketNumberLen, _ protocol.KeyPhaseBit, _ error) { + if len(data) == 0 { + return 0, 0, 0, 0, io.EOF + } + if data[0]&0x80 > 0 { + return 0, 0, 0, 0, errors.New("not a short header packet") + } + if data[0]&0x40 == 0 { + return 0, 0, 0, 0, errors.New("not a QUIC packet") + } + pnLen := protocol.PacketNumberLen(data[0]&0b11) + 1 + if len(data) < 1+int(pnLen)+connIDLen { + return 0, 0, 0, 0, io.EOF + } + + pos := 1 + connIDLen + var pn protocol.PacketNumber + switch pnLen { + case protocol.PacketNumberLen1: + pn = protocol.PacketNumber(data[pos]) + case protocol.PacketNumberLen2: + pn = protocol.PacketNumber(utils.BigEndian.Uint16(data[pos : pos+2])) + case protocol.PacketNumberLen3: + pn = protocol.PacketNumber(utils.BigEndian.Uint24(data[pos : pos+3])) + case protocol.PacketNumberLen4: + pn = protocol.PacketNumber(utils.BigEndian.Uint32(data[pos : pos+4])) + default: + return 0, 0, 0, 0, fmt.Errorf("invalid packet number length: %d", pnLen) + } + kp := protocol.KeyPhaseZero + if data[0]&0b100 > 0 { + kp = protocol.KeyPhaseOne + } + + var err error + if data[0]&0x18 != 0 { + err = ErrInvalidReservedBits + } + return 1 + connIDLen + int(pnLen), pn, pnLen, kp, err +} + +func LogShortHeader(logger utils.Logger, dest protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) { + logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", dest, pn, pnLen, kp) +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stop_sending_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stop_sending_frame.go index 31bd6d237..613e49b7f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stop_sending_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stop_sending_frame.go @@ -40,9 +40,9 @@ func (f *StopSendingFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { return 1 + quicvarint.Len(uint64(f.StreamID)) + quicvarint.Len(uint64(f.ErrorCode)) } -func (f *StopSendingFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x5) - quicvarint.Write(b, uint64(f.StreamID)) - quicvarint.Write(b, uint64(f.ErrorCode)) - return nil +func (f *StopSendingFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x5) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.ErrorCode)) + return b, nil } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_data_blocked_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_data_blocked_frame.go index 21a2401f1..2bf2f7c6f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_data_blocked_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_data_blocked_frame.go @@ -33,11 +33,11 @@ func parseStreamDataBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*St }, nil } -func (f *StreamDataBlockedFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x15) - quicvarint.Write(b, uint64(f.StreamID)) - quicvarint.Write(b, uint64(f.MaximumStreamData)) - return nil +func (f *StreamDataBlockedFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { + b = append(b, 0x15) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.MaximumStreamData)) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_frame.go index 2b107c377..2ee689c3f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/stream_frame.go @@ -26,9 +26,9 @@ func parseStreamFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StreamFrame, return nil, err } - hasOffset := typeByte&0x4 > 0 - fin := typeByte&0x1 > 0 - hasDataLen := typeByte&0x2 > 0 + hasOffset := typeByte&0b100 > 0 + fin := typeByte&0b1 > 0 + hasDataLen := typeByte&0b10 > 0 streamID, err := quicvarint.Read(r) if err != nil { @@ -84,32 +84,32 @@ func parseStreamFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StreamFrame, } // Write writes a STREAM frame -func (f *StreamFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { +func (f *StreamFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { if len(f.Data) == 0 && !f.Fin { - return errors.New("StreamFrame: attempting to write empty frame without FIN") + return nil, errors.New("StreamFrame: attempting to write empty frame without FIN") } typeByte := byte(0x8) if f.Fin { - typeByte ^= 0x1 + typeByte ^= 0b1 } hasOffset := f.Offset != 0 if f.DataLenPresent { - typeByte ^= 0x2 + typeByte ^= 0b10 } if hasOffset { - typeByte ^= 0x4 + typeByte ^= 0b100 } - b.WriteByte(typeByte) - quicvarint.Write(b, uint64(f.StreamID)) + b = append(b, typeByte) + b = quicvarint.Append(b, uint64(f.StreamID)) if hasOffset { - quicvarint.Write(b, uint64(f.Offset)) + b = quicvarint.Append(b, uint64(f.Offset)) } if f.DataLenPresent { - quicvarint.Write(b, uint64(f.DataLen())) + b = quicvarint.Append(b, uint64(f.DataLen())) } - b.Write(f.Data) - return nil + b = append(b, f.Data...) + return b, nil } // Length returns the total length of the STREAM frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/streams_blocked_frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/streams_blocked_frame.go index 299717834..3cf19c9f0 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/streams_blocked_frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/streams_blocked_frame.go @@ -38,15 +38,15 @@ func parseStreamsBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*Strea return f, nil } -func (f *StreamsBlockedFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { +func (f *StreamsBlockedFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) { switch f.Type { case protocol.StreamTypeBidi: - b.WriteByte(0x16) + b = append(b, 0x16) case protocol.StreamTypeUni: - b.WriteByte(0x17) + b = append(b, 0x17) } - quicvarint.Write(b, uint64(f.StreamLimit)) - return nil + b = quicvarint.Append(b, uint64(f.StreamLimit)) + return b, nil } // Length of a written frame diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go index a8673204e..d9ac5d391 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go @@ -2,6 +2,7 @@ package wire import ( "bytes" + "encoding/binary" "errors" "fmt" "io" @@ -321,88 +322,92 @@ func (p *TransportParameters) Marshal(pers protocol.Perspective, clientHelloPRNG return p.marshalClientRandomized(clientHelloPRNG) } - b := &bytes.Buffer{} + // Typical Transport Parameters consume around 110 bytes, depending on the exact values, + // especially the lengths of the Connection IDs. + // Allocate 256 bytes, so we won't have to grow the slice in any case. + b := make([]byte, 0, 256) // add a greased value - quicvarint.Write(b, uint64(27+31*rand.Intn(100))) + b = quicvarint.Append(b, uint64(27+31*rand.Intn(100))) length := rand.Intn(16) - randomData := make([]byte, length) - rand.Read(randomData) - quicvarint.Write(b, uint64(length)) - b.Write(randomData) + b = quicvarint.Append(b, uint64(length)) + b = b[:len(b)+length] + rand.Read(b[len(b)-length:]) // initial_max_stream_data_bidi_local - p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) + b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) // initial_max_stream_data_bidi_remote - p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) + b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) // initial_max_stream_data_uni - p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) + b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) // initial_max_data - p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) + b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) // initial_max_bidi_streams - p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) + b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) // initial_max_uni_streams - p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) + b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) // idle_timeout - p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond)) + b = p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond)) // max_packet_size - p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(protocol.MaxPacketBufferSize)) + b = p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(protocol.MaxPacketBufferSize)) // max_ack_delay // Only send it if is different from the default value. if p.MaxAckDelay != protocol.DefaultMaxAckDelay { - p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond)) + b = p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond)) } // ack_delay_exponent // Only send it if is different from the default value. if p.AckDelayExponent != protocol.DefaultAckDelayExponent { - p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent)) + b = p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent)) } // disable_active_migration if p.DisableActiveMigration { - quicvarint.Write(b, uint64(disableActiveMigrationParameterID)) - quicvarint.Write(b, 0) + b = quicvarint.Append(b, uint64(disableActiveMigrationParameterID)) + b = quicvarint.Append(b, 0) } if pers == protocol.PerspectiveServer { // stateless_reset_token if p.StatelessResetToken != nil { - quicvarint.Write(b, uint64(statelessResetTokenParameterID)) - quicvarint.Write(b, 16) - b.Write(p.StatelessResetToken[:]) + b = quicvarint.Append(b, uint64(statelessResetTokenParameterID)) + b = quicvarint.Append(b, 16) + b = append(b, p.StatelessResetToken[:]...) } // original_destination_connection_id - quicvarint.Write(b, uint64(originalDestinationConnectionIDParameterID)) - quicvarint.Write(b, uint64(p.OriginalDestinationConnectionID.Len())) - b.Write(p.OriginalDestinationConnectionID.Bytes()) + b = quicvarint.Append(b, uint64(originalDestinationConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.OriginalDestinationConnectionID.Len())) + b = append(b, p.OriginalDestinationConnectionID.Bytes()...) // preferred_address if p.PreferredAddress != nil { - quicvarint.Write(b, uint64(preferredAddressParameterID)) - quicvarint.Write(b, 4+2+16+2+1+uint64(p.PreferredAddress.ConnectionID.Len())+16) + b = quicvarint.Append(b, uint64(preferredAddressParameterID)) + b = quicvarint.Append(b, 4+2+16+2+1+uint64(p.PreferredAddress.ConnectionID.Len())+16) ipv4 := p.PreferredAddress.IPv4 - b.Write(ipv4[len(ipv4)-4:]) - utils.BigEndian.WriteUint16(b, p.PreferredAddress.IPv4Port) - b.Write(p.PreferredAddress.IPv6) - utils.BigEndian.WriteUint16(b, p.PreferredAddress.IPv6Port) - b.WriteByte(uint8(p.PreferredAddress.ConnectionID.Len())) - b.Write(p.PreferredAddress.ConnectionID.Bytes()) - b.Write(p.PreferredAddress.StatelessResetToken[:]) + b = append(b, ipv4[len(ipv4)-4:]...) + b = append(b, []byte{0, 0}...) + binary.BigEndian.PutUint16(b[len(b)-2:], p.PreferredAddress.IPv4Port) + b = append(b, p.PreferredAddress.IPv6...) + b = append(b, []byte{0, 0}...) + binary.BigEndian.PutUint16(b[len(b)-2:], p.PreferredAddress.IPv6Port) + b = append(b, uint8(p.PreferredAddress.ConnectionID.Len())) + b = append(b, p.PreferredAddress.ConnectionID.Bytes()...) + b = append(b, p.PreferredAddress.StatelessResetToken[:]...) } } // active_connection_id_limit - p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) + b = p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) // initial_source_connection_id - quicvarint.Write(b, uint64(initialSourceConnectionIDParameterID)) - quicvarint.Write(b, uint64(p.InitialSourceConnectionID.Len())) - b.Write(p.InitialSourceConnectionID.Bytes()) + b = quicvarint.Append(b, uint64(initialSourceConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.InitialSourceConnectionID.Len())) + b = append(b, p.InitialSourceConnectionID.Bytes()...) // retry_source_connection_id if pers == protocol.PerspectiveServer && p.RetrySourceConnectionID != nil { - quicvarint.Write(b, uint64(retrySourceConnectionIDParameterID)) - quicvarint.Write(b, uint64(p.RetrySourceConnectionID.Len())) - b.Write(p.RetrySourceConnectionID.Bytes()) + b = quicvarint.Append(b, uint64(retrySourceConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.RetrySourceConnectionID.Len())) + b = append(b, p.RetrySourceConnectionID.Bytes()...) } if p.MaxDatagramFrameSize != protocol.InvalidByteCount { - p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize)) + b = p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize)) } - return b.Bytes() + return b } // [Psiphon] @@ -410,65 +415,67 @@ func (p *TransportParameters) Marshal(pers protocol.Perspective, clientHelloPRNG // Marshal is retained as-is to ease future merging. func (p *TransportParameters) marshalClientRandomized(clientHelloPRNG *prng.PRNG) []byte { - b := &bytes.Buffer{} + // Typical Transport Parameters consume around 110 bytes, depending on the exact values, + // especially the lengths of the Connection IDs. + // Allocate 256 bytes, so we won't have to grow the slice in any case. + b := make([]byte, 0, 256) marshallers := []func(){ func() { // add a greased value - quicvarint.Write(b, uint64(27+31*rand.Intn(100))) + b = quicvarint.Append(b, uint64(27+31*rand.Intn(100))) length := rand.Intn(16) - randomData := make([]byte, length) - rand.Read(randomData) - quicvarint.Write(b, uint64(length)) - b.Write(randomData) + b = quicvarint.Append(b, uint64(length)) + b = b[:len(b)+length] + rand.Read(b[len(b)-length:]) }, func() { // initial_max_stream_data_bidi_local - p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) + b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) }, func() { // initial_max_stream_data_bidi_remote - p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) + b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) }, func() { // initial_max_stream_data_uni - p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) + b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) }, func() { // initial_max_data - p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) + b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) }, func() { // initial_max_bidi_streams - p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) + b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) }, func() { // initial_max_uni_streams - p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) + b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) }, func() { // idle_timeout - p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond)) + b = p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond)) }, func() { // max_packet_size - p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(protocol.MaxPacketBufferSize)) + b = p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(protocol.MaxPacketBufferSize)) }, func() { // max_ack_delay // Only send it if is different from the default value. if p.MaxAckDelay != protocol.DefaultMaxAckDelay { - p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond)) + b = p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond)) } }, @@ -476,33 +483,33 @@ func (p *TransportParameters) marshalClientRandomized(clientHelloPRNG *prng.PRNG // ack_delay_exponent // Only send it if is different from the default value. if p.AckDelayExponent != protocol.DefaultAckDelayExponent { - p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent)) + b = p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent)) } }, func() { // disable_active_migration if p.DisableActiveMigration { - quicvarint.Write(b, uint64(disableActiveMigrationParameterID)) - quicvarint.Write(b, 0) + b = quicvarint.Append(b, uint64(disableActiveMigrationParameterID)) + b = quicvarint.Append(b, 0) } }, func() { // active_connection_id_limit - p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) + b = p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) }, func() { // initial_source_connection_id - quicvarint.Write(b, uint64(initialSourceConnectionIDParameterID)) - quicvarint.Write(b, uint64(p.InitialSourceConnectionID.Len())) - b.Write(p.InitialSourceConnectionID.Bytes()) + b = quicvarint.Append(b, uint64(initialSourceConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.InitialSourceConnectionID.Len())) + b = append(b, p.InitialSourceConnectionID.Bytes()...) }, func() { if p.MaxDatagramFrameSize != protocol.InvalidByteCount { - p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize)) + b = p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize)) } }, } @@ -512,12 +519,13 @@ func (p *TransportParameters) marshalClientRandomized(clientHelloPRNG *prng.PRNG marshallers[j]() } - return b.Bytes() + return b } -func (p *TransportParameters) marshalVarintParam(b *bytes.Buffer, id transportParameterID, val uint64) { - quicvarint.Write(b, uint64(id)) - quicvarint.Write(b, uint64(quicvarint.Len(val))) - quicvarint.Write(b, val) + +func (p *TransportParameters) marshalVarintParam(b []byte, id transportParameterID, val uint64) []byte { + b = quicvarint.Append(b, uint64(id)) + b = quicvarint.Append(b, uint64(quicvarint.Len(val))) + return quicvarint.Append(b, val) } // MarshalForSessionTicket marshals the transport parameters we save in the session ticket. @@ -528,23 +536,23 @@ func (p *TransportParameters) marshalVarintParam(b *bytes.Buffer, id transportPa // if the transport parameters changed. // Since the session ticket is encrypted, the serialization format is defined by the server. // For convenience, we use the same format that we also use for sending the transport parameters. -func (p *TransportParameters) MarshalForSessionTicket(b *bytes.Buffer) { - quicvarint.Write(b, transportParameterMarshalingVersion) +func (p *TransportParameters) MarshalForSessionTicket(b []byte) []byte { + b = quicvarint.Append(b, transportParameterMarshalingVersion) // initial_max_stream_data_bidi_local - p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) + b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) // initial_max_stream_data_bidi_remote - p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) + b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) // initial_max_stream_data_uni - p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) + b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) // initial_max_data - p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) + b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) // initial_max_bidi_streams - p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) + b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) // initial_max_uni_streams - p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) + b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) // active_connection_id_limit - p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) + return p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) } // UnmarshalFromSessionTicket unmarshals transport parameters from a session ticket. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/version_negotiation.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/version_negotiation.go index d3614e8a2..71ee435bf 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/version_negotiation.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/version_negotiation.go @@ -3,6 +3,7 @@ package wire import ( "bytes" "crypto/rand" + "encoding/binary" "errors" "github.com/Psiphon-Labs/quic-go/internal/protocol" @@ -10,32 +11,30 @@ import ( ) // ParseVersionNegotiationPacket parses a Version Negotiation packet. -func ParseVersionNegotiationPacket(b *bytes.Reader) (*Header, []protocol.VersionNumber, error) { - hdr, err := parseHeader(b, 0) +func ParseVersionNegotiationPacket(b []byte) (dest, src protocol.ArbitraryLenConnectionID, _ []protocol.VersionNumber, _ error) { + n, dest, src, err := ParseArbitraryLenConnectionIDs(b) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - if b.Len() == 0 { + b = b[n:] + if len(b) == 0 { //nolint:stylecheck - return nil, nil, errors.New("Version Negotiation packet has empty version list") + return nil, nil, nil, errors.New("Version Negotiation packet has empty version list") } - if b.Len()%4 != 0 { + if len(b)%4 != 0 { //nolint:stylecheck - return nil, nil, errors.New("Version Negotiation packet has a version list with an invalid length") + return nil, nil, nil, errors.New("Version Negotiation packet has a version list with an invalid length") } - versions := make([]protocol.VersionNumber, b.Len()/4) - for i := 0; b.Len() > 0; i++ { - v, err := utils.BigEndian.ReadUint32(b) - if err != nil { - return nil, nil, err - } - versions[i] = protocol.VersionNumber(v) + versions := make([]protocol.VersionNumber, len(b)/4) + for i := 0; len(b) > 0; i++ { + versions[i] = protocol.VersionNumber(binary.BigEndian.Uint32(b[:4])) + b = b[4:] } - return hdr, versions, nil + return dest, src, versions, nil } // ComposeVersionNegotiation composes a Version Negotiation -func ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, versions []protocol.VersionNumber) []byte { +func ComposeVersionNegotiation(destConnID, srcConnID protocol.ArbitraryLenConnectionID, versions []protocol.VersionNumber) []byte { greasedVersions := protocol.GetGreasedVersions(versions) expectedLen := 1 /* type byte */ + 4 /* version field */ + 1 /* dest connection ID length field */ + destConnID.Len() + 1 /* src connection ID length field */ + srcConnID.Len() + len(greasedVersions)*4 buf := bytes.NewBuffer(make([]byte, 0, expectedLen)) @@ -44,9 +43,9 @@ func ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, vers buf.WriteByte(r[0] | 0x80) utils.BigEndian.WriteUint32(buf, 0) // version 0 buf.WriteByte(uint8(destConnID.Len())) - buf.Write(destConnID) + buf.Write(destConnID.Bytes()) buf.WriteByte(uint8(srcConnID.Len())) - buf.Write(srcConnID) + buf.Write(srcConnID.Bytes()) for _, v := range greasedVersions { utils.BigEndian.WriteUint32(buf, uint32(v)) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go index cce63d825..e2ad1592f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go @@ -19,6 +19,8 @@ type ( ByteCount = protocol.ByteCount // A ConnectionID is a QUIC Connection ID. ConnectionID = protocol.ConnectionID + // An ArbitraryLenConnectionID is a QUIC Connection ID that can be up to 255 bytes long. + ArbitraryLenConnectionID = protocol.ArbitraryLenConnectionID // The EncryptionLevel is the encryption level of a packet. EncryptionLevel = protocol.EncryptionLevel // The KeyPhase is the key phase of the 1-RTT keys. @@ -42,7 +44,7 @@ type ( // The Header is the QUIC packet header, before removing header protection. Header = wire.Header - // The ExtendedHeader is the QUIC packet header, after removing header protection. + // The ExtendedHeader is the QUIC Long Header packet header, after removing header protection. ExtendedHeader = wire.ExtendedHeader // The TransportParameters are QUIC transport parameters. TransportParameters = wire.TransportParameters @@ -90,6 +92,14 @@ const ( StreamTypeBidi = protocol.StreamTypeBidi ) +// The ShortHeader is the QUIC Short Header packet header, after removing header protection. +type ShortHeader struct { + DestConnectionID ConnectionID + PacketNumber PacketNumber + PacketNumberLen protocol.PacketNumberLen + KeyPhase KeyPhaseBit +} + // A Tracer traces events. type Tracer interface { // TracerForConnection requests a new tracer for a connection. @@ -99,6 +109,7 @@ type Tracer interface { TracerForConnection(ctx context.Context, p Perspective, odcid ConnectionID) ConnectionTracer SentPacket(net.Addr, *Header, ByteCount, []Frame) + SentVersionNegotiationPacket(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []VersionNumber) DroppedPacket(net.Addr, PacketType, ByteCount, PacketDropReason) } @@ -111,10 +122,11 @@ type ConnectionTracer interface { ReceivedTransportParameters(*TransportParameters) RestoredTransportParameters(parameters *TransportParameters) // for 0-RTT SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) - ReceivedVersionNegotiationPacket(*Header, []VersionNumber) + ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) ReceivedRetry(*Header) - ReceivedPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) - BufferedPacket(PacketType) + ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) + ReceivedShortHeaderPacket(hdr *ShortHeader, size ByteCount, frames []Frame) + BufferedPacket(PacketType, ByteCount) DroppedPacket(PacketType, ByteCount, PacketDropReason) UpdatedMetrics(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) AcknowledgedPacket(EncryptionLevel, PacketNumber) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/mockgen.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/mockgen.go index 76a43191e..f6cb334dc 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/mockgen.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/mockgen.go @@ -1,4 +1,4 @@ package logging -//go:generate sh -c "mockgen -package logging -self_package github.com/Psiphon-Labs/quic-go/logging -destination mock_connection_tracer_test.go github.com/Psiphon-Labs/quic-go/logging ConnectionTracer" -//go:generate sh -c "mockgen -package logging -self_package github.com/Psiphon-Labs/quic-go/logging -destination mock_tracer_test.go github.com/Psiphon-Labs/quic-go/logging Tracer" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/Psiphon-Labs/quic-go/logging -destination mock_connection_tracer_test.go github.com/Psiphon-Labs/quic-go/logging ConnectionTracer" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/Psiphon-Labs/quic-go/logging -destination mock_tracer_test.go github.com/Psiphon-Labs/quic-go/logging Tracer" diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go index 8280e8cdf..d7166f1aa 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go @@ -39,6 +39,12 @@ func (m *tracerMultiplexer) SentPacket(remote net.Addr, hdr *Header, size ByteCo } } +func (m *tracerMultiplexer) SentVersionNegotiationPacket(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []VersionNumber) { + for _, t := range m.tracers { + t.SentVersionNegotiationPacket(remote, dest, src, versions) + } +} + func (m *tracerMultiplexer) DroppedPacket(remote net.Addr, typ PacketType, size ByteCount, reason PacketDropReason) { for _, t := range m.tracers { t.DroppedPacket(remote, typ, size, reason) @@ -104,9 +110,9 @@ func (m *connTracerMultiplexer) SentPacket(hdr *ExtendedHeader, size ByteCount, } } -func (m *connTracerMultiplexer) ReceivedVersionNegotiationPacket(hdr *Header, versions []VersionNumber) { +func (m *connTracerMultiplexer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, versions []VersionNumber) { for _, t := range m.tracers { - t.ReceivedVersionNegotiationPacket(hdr, versions) + t.ReceivedVersionNegotiationPacket(dest, src, versions) } } @@ -116,15 +122,21 @@ func (m *connTracerMultiplexer) ReceivedRetry(hdr *Header) { } } -func (m *connTracerMultiplexer) ReceivedPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) { +func (m *connTracerMultiplexer) ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) { + for _, t := range m.tracers { + t.ReceivedLongHeaderPacket(hdr, size, frames) + } +} + +func (m *connTracerMultiplexer) ReceivedShortHeaderPacket(hdr *ShortHeader, size ByteCount, frames []Frame) { for _, t := range m.tracers { - t.ReceivedPacket(hdr, size, frames) + t.ReceivedShortHeaderPacket(hdr, size, frames) } } -func (m *connTracerMultiplexer) BufferedPacket(typ PacketType) { +func (m *connTracerMultiplexer) BufferedPacket(typ PacketType, size ByteCount) { for _, t := range m.tracers { - t.BufferedPacket(typ) + t.BufferedPacket(typ, size) } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go index 4e0bb60ba..3103ae90a 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go @@ -10,31 +10,40 @@ import ( // It is useful for embedding. type NullTracer struct{} +var _ Tracer = &NullTracer{} + func (n NullTracer) TracerForConnection(context.Context, Perspective, ConnectionID) ConnectionTracer { return NullConnectionTracer{} } -func (n NullTracer) SentPacket(net.Addr, *Header, ByteCount, []Frame) {} +func (n NullTracer) SentPacket(net.Addr, *Header, ByteCount, []Frame) {} +func (n NullTracer) SentVersionNegotiationPacket(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []VersionNumber) { +} func (n NullTracer) DroppedPacket(net.Addr, PacketType, ByteCount, PacketDropReason) {} // The NullConnectionTracer is a ConnectionTracer that does nothing. // It is useful for embedding. type NullConnectionTracer struct{} +var _ ConnectionTracer = &NullConnectionTracer{} + func (n NullConnectionTracer) StartedConnection(local, remote net.Addr, srcConnID, destConnID ConnectionID) { } func (n NullConnectionTracer) NegotiatedVersion(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) { } -func (n NullConnectionTracer) ClosedConnection(err error) {} -func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) SentPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {} -func (n NullConnectionTracer) ReceivedVersionNegotiationPacket(*Header, []VersionNumber) {} -func (n NullConnectionTracer) ReceivedRetry(*Header) {} -func (n NullConnectionTracer) ReceivedPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) {} -func (n NullConnectionTracer) BufferedPacket(PacketType) {} -func (n NullConnectionTracer) DroppedPacket(PacketType, ByteCount, PacketDropReason) {} +func (n NullConnectionTracer) ClosedConnection(err error) {} +func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) SentPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {} +func (n NullConnectionTracer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) { +} +func (n NullConnectionTracer) ReceivedRetry(*Header) {} +func (n NullConnectionTracer) ReceivedLongHeaderPacket(*ExtendedHeader, ByteCount, []Frame) {} +func (n NullConnectionTracer) ReceivedShortHeaderPacket(*ShortHeader, ByteCount, []Frame) {} +func (n NullConnectionTracer) BufferedPacket(PacketType, ByteCount) {} +func (n NullConnectionTracer) DroppedPacket(PacketType, ByteCount, PacketDropReason) {} + func (n NullConnectionTracer) UpdatedMetrics(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) { } func (n NullConnectionTracer) AcknowledgedPacket(EncryptionLevel, PacketNumber) {} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/mockgen.go b/vendor/github.com/Psiphon-Labs/quic-go/mockgen.go index c83a23fbf..489df452c 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/mockgen.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/mockgen.go @@ -23,5 +23,5 @@ package quic //go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/Psiphon-Labs/quic-go packetHandlerManager" //go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/Psiphon-Labs/quic-go multiplexer" //go:generate sh -c "./mockgen_private.sh quic mock_batch_conn_test.go github.com/Psiphon-Labs/quic-go batchConn" -//go:generate sh -c "mockgen -package quic -self_package github.com/Psiphon-Labs/quic-go -destination mock_token_store_test.go github.com/Psiphon-Labs/quic-go TokenStore" -//go:generate sh -c "mockgen -package quic -self_package github.com/Psiphon-Labs/quic-go -destination mock_packetconn_test.go net PacketConn" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/Psiphon-Labs/quic-go -destination mock_token_store_test.go github.com/Psiphon-Labs/quic-go TokenStore" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/Psiphon-Labs/quic-go -destination mock_packetconn_test.go net PacketConn" diff --git a/vendor/github.com/Psiphon-Labs/quic-go/mockgen_private.sh b/vendor/github.com/Psiphon-Labs/quic-go/mockgen_private.sh index 27829175f..ba3cdb35c 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/mockgen_private.sh +++ b/vendor/github.com/Psiphon-Labs/quic-go/mockgen_private.sh @@ -44,6 +44,6 @@ AUX_FILES=$(IFS=, ; echo "${AUX[*]}") ## create a public alias for the interface, so that mockgen can process it echo -e "package $1\n" > $TMPFILE echo "$INTERFACE" | sed "s/$ORIG_INTERFACE_NAME/$INTERFACE_NAME/" >> $TMPFILE -mockgen -package $1 -self_package $3 -destination $DEST -source=$TMPFILE -aux_files $AUX_FILES +go run github.com/golang/mock/mockgen -package $1 -self_package $3 -destination $DEST -source=$TMPFILE -aux_files $AUX_FILES sed "s/$TMPFILE/$SRC/" "$DEST" > "$DEST.new" && mv "$DEST.new" "$DEST" rm "$TMPFILE" diff --git a/vendor/github.com/Psiphon-Labs/quic-go/multiplexer.go b/vendor/github.com/Psiphon-Labs/quic-go/multiplexer.go index 76d82b52b..2c1747b1f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/multiplexer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/multiplexer.go @@ -1,7 +1,6 @@ package quic import ( - "bytes" "fmt" "net" "sync" @@ -20,13 +19,13 @@ type indexableConn interface { } type multiplexer interface { - AddConn(c net.PacketConn, connIDLen int, statelessResetKey []byte, tracer logging.Tracer) (packetHandlerManager, error) + AddConn(c net.PacketConn, connIDLen int, statelessResetKey *StatelessResetKey, tracer logging.Tracer) (packetHandlerManager, error) RemoveConn(indexableConn) error } type connManager struct { connIDLen int - statelessResetKey []byte + statelessResetKey *StatelessResetKey tracer logging.Tracer manager packetHandlerManager } @@ -37,7 +36,7 @@ type connMultiplexer struct { mutex sync.Mutex conns map[string] /* LocalAddr().String() */ connManager - newPacketHandlerManager func(net.PacketConn, int, []byte, logging.Tracer, utils.Logger) (packetHandlerManager, error) // so it can be replaced in the tests + newPacketHandlerManager func(net.PacketConn, int, *StatelessResetKey, logging.Tracer, utils.Logger) (packetHandlerManager, error) // so it can be replaced in the tests logger utils.Logger } @@ -58,7 +57,7 @@ func getMultiplexer() multiplexer { func (m *connMultiplexer) AddConn( c net.PacketConn, connIDLen int, - statelessResetKey []byte, + statelessResetKey *StatelessResetKey, tracer logging.Tracer, ) (packetHandlerManager, error) { m.mutex.Lock() @@ -83,7 +82,7 @@ func (m *connMultiplexer) AddConn( if p.connIDLen != connIDLen { return nil, fmt.Errorf("cannot use %d byte connection IDs on a connection that is already using %d byte connction IDs", connIDLen, p.connIDLen) } - if statelessResetKey != nil && !bytes.Equal(p.statelessResetKey, statelessResetKey) { + if statelessResetKey != nil && p.statelessResetKey != statelessResetKey { return nil, fmt.Errorf("cannot use different stateless reset keys on the same packet conn") } if tracer != p.tracer { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go b/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go index 05698eecd..dec95fa8f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go @@ -47,7 +47,7 @@ type packetHandlerMap struct { closeQueue chan closePacket - handlers map[string] /* string(ConnectionID)*/ packetHandler + handlers map[protocol.ConnectionID]packetHandler resetTokens map[protocol.StatelessResetToken] /* stateless reset token */ packetHandler server unknownPacketHandler numZeroRTTEntries int @@ -104,7 +104,7 @@ var receiveBufferWarningOnce sync.Once func newPacketHandlerMap( c net.PacketConn, connIDLen int, - statelessResetKey []byte, + statelessResetKey *StatelessResetKey, tracer logging.Tracer, logger utils.Logger, ) (packetHandlerManager, error) { @@ -129,16 +129,18 @@ func newPacketHandlerMap( conn: conn, connIDLen: connIDLen, listening: make(chan struct{}), - handlers: make(map[string]packetHandler), + handlers: make(map[protocol.ConnectionID]packetHandler), resetTokens: make(map[protocol.StatelessResetToken]packetHandler), deleteRetiredConnsAfter: protocol.RetiredConnectionIDDeleteTimeout, zeroRTTQueueDuration: protocol.Max0RTTQueueingDuration, closeQueue: make(chan closePacket, 4), - statelessResetEnabled: len(statelessResetKey) > 0, - statelessResetHasher: hmac.New(sha256.New, statelessResetKey), + statelessResetEnabled: statelessResetKey != nil, tracer: tracer, logger: logger, } + if m.statelessResetEnabled { + m.statelessResetHasher = hmac.New(sha256.New, statelessResetKey[:]) + } go m.listen() go m.runCloseQueue() @@ -178,11 +180,11 @@ func (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler) h.mutex.Lock() defer h.mutex.Unlock() - if _, ok := h.handlers[string(id)]; ok { + if _, ok := h.handlers[id]; ok { h.logger.Debugf("Not adding connection ID %s, as it already exists.", id) return false } - h.handlers[string(id)] = handler + h.handlers[id] = handler h.logger.Debugf("Adding connection ID %s.", id) return true } @@ -192,7 +194,7 @@ func (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.Co defer h.mutex.Unlock() var q *zeroRTTQueue - if handler, ok := h.handlers[string(clientDestConnID)]; ok { + if handler, ok := h.handlers[clientDestConnID]; ok { q, ok = handler.(*zeroRTTQueue) if !ok { h.logger.Debugf("Not adding connection ID %s for a new connection, as it already exists.", clientDestConnID) @@ -208,15 +210,15 @@ func (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.Co if q != nil { q.EnqueueAll(conn) } - h.handlers[string(clientDestConnID)] = conn - h.handlers[string(newConnID)] = conn + h.handlers[clientDestConnID] = conn + h.handlers[newConnID] = conn h.logger.Debugf("Adding connection IDs %s and %s for a new connection.", clientDestConnID, newConnID) return true } func (h *packetHandlerMap) Remove(id protocol.ConnectionID) { h.mutex.Lock() - delete(h.handlers, string(id)) + delete(h.handlers, id) h.mutex.Unlock() h.logger.Debugf("Removing connection ID %s.", id) } @@ -225,7 +227,7 @@ func (h *packetHandlerMap) Retire(id protocol.ConnectionID) { h.logger.Debugf("Retiring connection ID %s in %s.", id, h.deleteRetiredConnsAfter) time.AfterFunc(h.deleteRetiredConnsAfter, func() { h.mutex.Lock() - delete(h.handlers, string(id)) + delete(h.handlers, id) h.mutex.Unlock() h.logger.Debugf("Removing connection ID %s after it has been retired.", id) }) @@ -256,7 +258,7 @@ func (h *packetHandlerMap) ReplaceWithClosed(ids []protocol.ConnectionID, pers p h.mutex.Lock() for _, id := range ids { - h.handlers[string(id)] = handler + h.handlers[id] = handler } h.mutex.Unlock() h.logger.Debugf("Replacing connection for connection IDs %s with a closed connection.", ids) @@ -265,7 +267,7 @@ func (h *packetHandlerMap) ReplaceWithClosed(ids []protocol.ConnectionID, pers p h.mutex.Lock() handler.shutdown() for _, id := range ids { - delete(h.handlers, string(id)) + delete(h.handlers, id) } h.mutex.Unlock() h.logger.Debugf("Removing connection IDs %s for a closed connection after it has been retired.", ids) @@ -396,7 +398,7 @@ func (h *packetHandlerMap) handlePacket(p *receivedPacket) { return } - if handler, ok := h.handlers[string(connID)]; ok { + if handler, ok := h.handlers[connID]; ok { if ha, ok := handler.(*zeroRTTQueue); ok { // only enqueue 0-RTT packets in the 0-RTT queue if wire.Is0RTTPacket(p.data) { ha.handlePacket(p) @@ -421,15 +423,15 @@ func (h *packetHandlerMap) handlePacket(p *receivedPacket) { } h.numZeroRTTEntries++ queue := &zeroRTTQueue{queue: make([]*receivedPacket, 0, 8)} - h.handlers[string(connID)] = queue + h.handlers[connID] = queue queue.retireTimer = time.AfterFunc(h.zeroRTTQueueDuration, func() { h.mutex.Lock() defer h.mutex.Unlock() // The entry might have been replaced by an actual connection. // Only delete it if it's still a 0-RTT queue. - if handler, ok := h.handlers[string(connID)]; ok { + if handler, ok := h.handlers[connID]; ok { if q, ok := handler.(*zeroRTTQueue); ok { - delete(h.handlers, string(connID)) + delete(h.handlers, connID) h.numZeroRTTEntries-- if h.numZeroRTTEntries < 0 { panic("number of 0-RTT queues < 0") diff --git a/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go b/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go index 21dba3b52..0d24c0a11 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go @@ -16,10 +16,9 @@ import ( ) type packer interface { - PackCoalescedPacket() (*coalescedPacket, error) - PackPacket() (*packedPacket, error) + PackCoalescedPacket(onlyAck bool) (*coalescedPacket, error) + PackPacket(onlyAck bool) (*packedPacket, error) MaybePackProbePacket(protocol.EncryptionLevel) (*packedPacket, error) - MaybePackAckPacket(handshakeConfirmed bool) (*packedPacket, error) PackConnectionClose(*qerr.TransportError) (*coalescedPacket, error) PackApplicationClose(*qerr.ApplicationError) (*coalescedPacket, error) @@ -100,15 +99,16 @@ func (p *packetContents) ToAckHandlerPacket(now time.Time, q *retransmissionQueu p.frames[i].OnLost = q.AddAppData } } - return &ackhandler.Packet{ - PacketNumber: p.header.PacketNumber, - LargestAcked: largestAcked, - Frames: p.frames, - Length: p.length, - EncryptionLevel: encLevel, - SendTime: now, - IsPathMTUProbePacket: p.isMTUProbePacket, - } + + ap := ackhandler.GetPacket() + ap.PacketNumber = p.header.PacketNumber + ap.LargestAcked = largestAcked + ap.Frames = p.frames + ap.Length = p.length + ap.EncryptionLevel = encLevel + ap.SendTime = now + ap.IsPathMTUProbePacket = p.isMTUProbePacket + return ap } func getMaxPacketSize(addr net.Addr, maxPacketSizeAdustment int) protocol.ByteCount { @@ -339,39 +339,6 @@ func (p *packetPacker) packetLength(hdr *wire.ExtendedHeader, payload *payload) return hdr.GetLength(p.version) + payload.length + paddingLen } -func (p *packetPacker) MaybePackAckPacket(handshakeConfirmed bool) (*packedPacket, error) { - var encLevel protocol.EncryptionLevel - var ack *wire.AckFrame - if !handshakeConfirmed { - ack = p.acks.GetAckFrame(protocol.EncryptionInitial, true) - if ack != nil { - encLevel = protocol.EncryptionInitial - } else { - ack = p.acks.GetAckFrame(protocol.EncryptionHandshake, true) - if ack != nil { - encLevel = protocol.EncryptionHandshake - } - } - } - if ack == nil { - ack = p.acks.GetAckFrame(protocol.Encryption1RTT, true) - if ack == nil { - return nil, nil - } - encLevel = protocol.Encryption1RTT - } - payload := &payload{ - ack: ack, - length: ack.Length(p.version), - } - - sealer, hdr, err := p.getSealerAndHeader(encLevel) - if err != nil { - return nil, err - } - return p.writeSinglePacket(hdr, payload, encLevel, sealer) -} - // size is the expected size of the packet, if no padding was applied. func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount { // For the server, only ack-eliciting Initial packets need to be padded. @@ -387,7 +354,7 @@ func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protoco // PackCoalescedPacket packs a new packet. // It packs an Initial / Handshake if there is data to send in these packet number spaces. // It should only be called before the handshake is confirmed. -func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) { +func (p *packetPacker) PackCoalescedPacket(onlyAck bool) (*coalescedPacket, error) { maxPacketSize := p.maxPacketSize if p.perspective == protocol.PerspectiveClient { maxPacketSize = protocol.MinInitialPacketSize @@ -402,7 +369,7 @@ func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) { } var size protocol.ByteCount if initialSealer != nil { - initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), size, protocol.EncryptionInitial) + initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true) if initialPayload != nil { size += p.packetLength(initialHdr, initialPayload) + protocol.ByteCount(initialSealer.Overhead()) numPackets++ @@ -411,14 +378,14 @@ func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) { // Add a Handshake packet. var handshakeSealer sealer - if size < maxPacketSize-protocol.MinCoalescedPacketSize { + if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { var err error handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { return nil, err } if handshakeSealer != nil { - handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), size, protocol.EncryptionHandshake) + handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0) if handshakePayload != nil { s := p.packetLength(handshakeHdr, handshakePayload) + protocol.ByteCount(handshakeSealer.Overhead()) size += s @@ -430,17 +397,24 @@ func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) { // Add a 0-RTT / 1-RTT packet. var appDataSealer sealer appDataEncLevel := protocol.Encryption1RTT - if size < maxPacketSize-protocol.MinCoalescedPacketSize { - var err error - appDataSealer, appDataHdr, appDataPayload = p.maybeGetAppDataPacket(maxPacketSize-size, size) - if err != nil { - return nil, err - } - if appDataHdr != nil { - if appDataHdr.IsLongHeader { - appDataEncLevel = protocol.Encryption0RTT + if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + var sErr error + var oneRTTSealer handshake.ShortHeaderSealer + oneRTTSealer, sErr = p.cryptoSetup.Get1RTTSealer() + appDataSealer = oneRTTSealer + if sErr != nil && p.perspective == protocol.PerspectiveClient { + appDataSealer, sErr = p.cryptoSetup.Get0RTTSealer() + appDataEncLevel = protocol.Encryption0RTT + } + if appDataSealer != nil && sErr == nil { + //nolint:exhaustive // 0-RTT and 1-RTT are the only two application data encryption levels. + switch appDataEncLevel { + case protocol.Encryption0RTT: + appDataHdr, appDataPayload = p.maybeGetAppDataPacketFor0RTT(appDataSealer, maxPacketSize-size) + case protocol.Encryption1RTT: + appDataHdr, appDataPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, maxPacketSize-size, onlyAck, size == 0) } - if appDataPayload != nil { + if appDataHdr != nil && appDataPayload != nil { size += p.packetLength(appDataHdr, appDataPayload) + protocol.ByteCount(appDataSealer.Overhead()) numPackets++ } @@ -483,17 +457,17 @@ func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) { // PackPacket packs a packet in the application data packet number space. // It should be called after the handshake is confirmed. -func (p *packetPacker) PackPacket() (*packedPacket, error) { - sealer, hdr, payload := p.maybeGetAppDataPacket(p.maxPacketSize, 0) +func (p *packetPacker) PackPacket(onlyAck bool) (*packedPacket, error) { + sealer, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return nil, err + } + hdr, payload := p.maybeGetShortHeaderPacket(sealer, p.maxPacketSize, onlyAck, true) if payload == nil { return nil, nil } buffer := getPacketBuffer() - encLevel := protocol.Encryption1RTT - if hdr.IsLongHeader { - encLevel = protocol.Encryption0RTT - } - cont, err := p.appendPacket(buffer, hdr, payload, 0, encLevel, sealer, false) + cont, err := p.appendPacket(buffer, hdr, payload, 0, protocol.Encryption1RTT, sealer, false) if err != nil { return nil, err } @@ -503,7 +477,17 @@ func (p *packetPacker) PackPacket() (*packedPacket, error) { }, nil } -func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize, currentSize protocol.ByteCount, encLevel protocol.EncryptionLevel) (*wire.ExtendedHeader, *payload) { +func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool) (*wire.ExtendedHeader, *payload) { + if onlyAck { + if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { + var payload payload + payload.ack = ack + payload.length = ack.Length(p.version) + return p.getLongHeader(encLevel), &payload + } + return nil, nil + } + var s cryptoStream var hasRetransmission bool //nolint:exhaustive // Initial and Handshake are the only two encryption levels here. @@ -518,7 +502,7 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize, currentSize protocol. hasData := s.HasData() var ack *wire.AckFrame - if encLevel == protocol.EncryptionInitial || currentSize == 0 { + if ackAllowed { ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData) } if !hasData && !hasRetransmission && ack == nil { @@ -560,35 +544,26 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize, currentSize protocol. return hdr, &payload } -func (p *packetPacker) maybeGetAppDataPacket(maxPacketSize, currentSize protocol.ByteCount) (sealer, *wire.ExtendedHeader, *payload) { - var sealer sealer - var encLevel protocol.EncryptionLevel - var hdr *wire.ExtendedHeader - oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer() - if err == nil { - encLevel = protocol.Encryption1RTT - sealer = oneRTTSealer - hdr = p.getShortHeader(oneRTTSealer.KeyPhase()) - } else { - // 1-RTT sealer not yet available - if p.perspective != protocol.PerspectiveClient { - return nil, nil, nil - } - sealer, err = p.cryptoSetup.Get0RTTSealer() - if sealer == nil || err != nil { - return nil, nil, nil - } - encLevel = protocol.Encryption0RTT - hdr = p.getLongHeader(protocol.Encryption0RTT) +func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount) (*wire.ExtendedHeader, *payload) { + if p.perspective != protocol.PerspectiveClient { + return nil, nil } + hdr := p.getLongHeader(protocol.Encryption0RTT) maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead()) - payload := p.maybeGetAppDataPacketWithEncLevel(maxPayloadSize, encLevel == protocol.Encryption1RTT && currentSize == 0) - return sealer, hdr, payload + payload := p.maybeGetAppDataPacket(maxPayloadSize, false, false) + return hdr, payload } -func (p *packetPacker) maybeGetAppDataPacketWithEncLevel(maxPayloadSize protocol.ByteCount, ackAllowed bool) *payload { - payload := p.composeNextPacket(maxPayloadSize, ackAllowed) +func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool) (*wire.ExtendedHeader, *payload) { + hdr := p.getShortHeader(sealer.KeyPhase()) + maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead()) + payload := p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed) + return hdr, payload +} + +func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool) *payload { + payload := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed) // check if we have anything to send if len(payload.frames) == 0 { @@ -611,35 +586,47 @@ func (p *packetPacker) maybeGetAppDataPacketWithEncLevel(maxPayloadSize protocol return payload } -func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, ackAllowed bool) *payload { - payload := &payload{frames: make([]ackhandler.Frame, 0, 1)} - - var hasDatagram bool - if p.datagramQueue != nil { - if datagram := p.datagramQueue.Get(); datagram != nil { - payload.frames = append(payload.frames, ackhandler.Frame{ - Frame: datagram, - // set it to a no-op. Then we won't set the default callback, which would retransmit the frame. - OnLost: func(wire.Frame) {}, - }) - payload.length += datagram.Length(p.version) - hasDatagram = true +func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool) *payload { + if onlyAck { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { + payload := &payload{} + payload.ack = ack + payload.length += ack.Length(p.version) + return payload } + return &payload{} } - var ack *wire.AckFrame + payload := &payload{frames: make([]ackhandler.Frame, 0, 1)} + hasData := p.framer.HasData() hasRetransmission := p.retransmissionQueue.HasAppData() - // TODO: make sure ACKs are sent when a lot of DATAGRAMs are queued - if !hasDatagram && ackAllowed { - ack = p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData) - if ack != nil { + + var hasAck bool + if ackAllowed { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { payload.ack = ack payload.length += ack.Length(p.version) + hasAck = true } } - if ack == nil && !hasData && !hasRetransmission { + if p.datagramQueue != nil { + if f := p.datagramQueue.Peek(); f != nil { + size := f.Length(p.version) + if size <= maxFrameSize-payload.length { + payload.frames = append(payload.frames, ackhandler.Frame{ + Frame: f, + // set it to a no-op. Then we won't set the default callback, which would retransmit the frame. + OnLost: func(wire.Frame) {}, + }) + payload.length += size + p.datagramQueue.Pop() + } + } + } + + if hasAck && !hasData && !hasRetransmission { return payload } @@ -681,14 +668,14 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) ( if err != nil { return nil, err } - hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), 0, protocol.EncryptionInitial) + hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true) case protocol.EncryptionHandshake: var err error sealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil { return nil, err } - hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), 0, protocol.EncryptionHandshake) + hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true) case protocol.Encryption1RTT: oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer() if err != nil { @@ -696,7 +683,7 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) ( } sealer = oneRTTSealer hdr = p.getShortHeader(oneRTTSealer.KeyPhase()) - payload = p.maybeGetAppDataPacketWithEncLevel(p.maxPacketSize-protocol.ByteCount(sealer.Overhead())-hdr.GetLength(p.version), true) + payload = p.maybeGetAppDataPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead())-hdr.GetLength(p.version), false, true) default: panic("unknown encryption level") } @@ -742,41 +729,6 @@ func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.B }, nil } -func (p *packetPacker) getSealerAndHeader(encLevel protocol.EncryptionLevel) (sealer, *wire.ExtendedHeader, error) { - switch encLevel { - case protocol.EncryptionInitial: - sealer, err := p.cryptoSetup.GetInitialSealer() - if err != nil { - return nil, nil, err - } - hdr := p.getLongHeader(protocol.EncryptionInitial) - return sealer, hdr, nil - case protocol.Encryption0RTT: - sealer, err := p.cryptoSetup.Get0RTTSealer() - if err != nil { - return nil, nil, err - } - hdr := p.getLongHeader(protocol.Encryption0RTT) - return sealer, hdr, nil - case protocol.EncryptionHandshake: - sealer, err := p.cryptoSetup.GetHandshakeSealer() - if err != nil { - return nil, nil, err - } - hdr := p.getLongHeader(protocol.EncryptionHandshake) - return sealer, hdr, nil - case protocol.Encryption1RTT: - sealer, err := p.cryptoSetup.Get1RTTSealer() - if err != nil { - return nil, nil, err - } - hdr := p.getShortHeader(sealer.KeyPhase()) - return sealer, hdr, nil - default: - return nil, nil, fmt.Errorf("unexpected encryption level: %s", encLevel) - } -} - func (p *packetPacker) getShortHeader(kp protocol.KeyPhaseBit) *wire.ExtendedHeader { pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) hdr := &wire.ExtendedHeader{} @@ -811,28 +763,6 @@ func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel) *wire.Ex return hdr } -// writeSinglePacket packs a single packet. -func (p *packetPacker) writeSinglePacket( - hdr *wire.ExtendedHeader, - payload *payload, - encLevel protocol.EncryptionLevel, - sealer sealer, -) (*packedPacket, error) { - buffer := getPacketBuffer() - var paddingLen protocol.ByteCount - if encLevel == protocol.EncryptionInitial { - paddingLen = p.initialPaddingLen(payload.frames, hdr.GetLength(p.version)+payload.length+protocol.ByteCount(sealer.Overhead())) - } - contents, err := p.appendPacket(buffer, hdr, payload, paddingLen, encLevel, sealer, false) - if err != nil { - return nil, err - } - return &packedPacket{ - buffer: buffer, - packetContents: contents, - }, nil -} - func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedHeader, payload *payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, isMTUProbePacket bool) (*packetContents, error) { var paddingLen protocol.ByteCount pnLen := protocol.ByteCount(header.PacketNumberLen) @@ -850,35 +780,38 @@ func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedH return nil, err } payloadOffset := buf.Len() + raw := buffer.Data[:payloadOffset] if payload.ack != nil { - if err := payload.ack.Write(buf, p.version); err != nil { + var err error + raw, err = payload.ack.Append(raw, p.version) + if err != nil { return nil, err } } if paddingLen > 0 { - buf.Write(make([]byte, paddingLen)) + raw = append(raw, make([]byte, paddingLen)...) } for _, frame := range payload.frames { - if err := frame.Write(buf, p.version); err != nil { + var err error + raw, err = frame.Append(raw, p.version) + if err != nil { return nil, err } } - if payloadSize := protocol.ByteCount(buf.Len()-payloadOffset) - paddingLen; payloadSize != payload.length { + if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != payload.length { return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", payload.length, payloadSize) } if !isMTUProbePacket { - if size := protocol.ByteCount(buf.Len() + sealer.Overhead()); size > p.maxPacketSize { + if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > p.maxPacketSize { return nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize) } } - raw := buffer.Data // encrypt the packet - raw = raw[:buf.Len()] _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], header.PacketNumber, raw[hdrOffset:payloadOffset]) - raw = raw[0 : buf.Len()+sealer.Overhead()] + raw = raw[0 : len(raw)+sealer.Overhead()] // apply header protection pnOffset := payloadOffset - int(header.PacketNumberLen) sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[hdrOffset], raw[pnOffset:payloadOffset]) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go b/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go index ce9ad02bb..d790aa050 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go @@ -7,6 +7,7 @@ import ( "github.com/Psiphon-Labs/quic-go/internal/handshake" "github.com/Psiphon-Labs/quic-go/internal/protocol" + "github.com/Psiphon-Labs/quic-go/internal/qerr" "github.com/Psiphon-Labs/quic-go/internal/wire" ) @@ -27,7 +28,6 @@ func (e *headerParseError) Error() string { } type unpackedPacket struct { - packetNumber protocol.PacketNumber // the decoded packet number hdr *wire.ExtendedHeader encryptionLevel protocol.EncryptionLevel data []byte @@ -37,22 +37,25 @@ type unpackedPacket struct { type packetUnpacker struct { cs handshake.CryptoSetup - version protocol.VersionNumber + shortHdrConnIDLen int + version protocol.VersionNumber } var _ unpacker = &packetUnpacker{} -func newPacketUnpacker(cs handshake.CryptoSetup, version protocol.VersionNumber) unpacker { +func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version protocol.VersionNumber) unpacker { return &packetUnpacker{ - cs: cs, - version: version, + cs: cs, + shortHdrConnIDLen: shortHdrConnIDLen, + version: version, } } +// UnpackLongHeader unpacks a Long Header packet. // If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits. // If any other error occurred when parsing the header, the error is of type headerParseError. // If decrypting the payload fails for any reason, the error is the error returned by the AEAD. -func (u *packetUnpacker) Unpack(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) { +func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) { var encLevel protocol.EncryptionLevel var extHdr *wire.ExtendedHeader var decrypted []byte @@ -89,30 +92,43 @@ func (u *packetUnpacker) Unpack(hdr *wire.Header, rcvTime time.Time, data []byte return nil, err } default: - if hdr.IsLongHeader { - return nil, fmt.Errorf("unknown packet type: %s", hdr.Type) - } - encLevel = protocol.Encryption1RTT - opener, err := u.cs.Get1RTTOpener() - if err != nil { - return nil, err - } - extHdr, decrypted, err = u.unpackShortHeaderPacket(opener, hdr, rcvTime, data) - if err != nil { - return nil, err + return nil, fmt.Errorf("unknown packet type: %s", hdr.Type) + } + + if len(decrypted) == 0 { + return nil, &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "empty packet", } } return &unpackedPacket{ hdr: extHdr, - packetNumber: extHdr.PacketNumber, encryptionLevel: encLevel, data: decrypted, }, nil } +func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) { + opener, err := u.cs.Get1RTTOpener() + if err != nil { + return 0, 0, 0, nil, err + } + pn, pnLen, kp, decrypted, err := u.unpackShortHeaderPacket(opener, rcvTime, data) + if err != nil { + return 0, 0, 0, nil, err + } + if len(decrypted) == 0 { + return 0, 0, 0, nil, &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "empty packet", + } + } + return pn, pnLen, kp, decrypted, nil +} + func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) { - extHdr, parseErr := u.unpackHeader(opener, hdr, data) + extHdr, parseErr := u.unpackLongHeader(opener, hdr, data) // If the reserved bits are set incorrectly, we still need to continue unpacking. // This avoids a timing side-channel, which otherwise might allow an attacker // to gain information about the header encryption. @@ -131,41 +147,57 @@ func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpene return extHdr, decrypted, nil } -func (u *packetUnpacker) unpackShortHeaderPacket( - opener handshake.ShortHeaderOpener, - hdr *wire.Header, - rcvTime time.Time, - data []byte, -) (*wire.ExtendedHeader, []byte, error) { - extHdr, parseErr := u.unpackHeader(opener, hdr, data) +func (u *packetUnpacker) unpackShortHeaderPacket(opener handshake.ShortHeaderOpener, rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) { + l, pn, pnLen, kp, parseErr := u.unpackShortHeader(opener, data) // If the reserved bits are set incorrectly, we still need to continue unpacking. // This avoids a timing side-channel, which otherwise might allow an attacker // to gain information about the header encryption. if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { - return nil, nil, parseErr + return 0, 0, 0, nil, &headerParseError{parseErr} } - extHdr.PacketNumber = opener.DecodePacketNumber(extHdr.PacketNumber, extHdr.PacketNumberLen) - extHdrLen := extHdr.ParsedLen() - decrypted, err := opener.Open(data[extHdrLen:extHdrLen], data[extHdrLen:], rcvTime, extHdr.PacketNumber, extHdr.KeyPhase, data[:extHdrLen]) + pn = opener.DecodePacketNumber(pn, pnLen) + decrypted, err := opener.Open(data[l:l], data[l:], rcvTime, pn, kp, data[:l]) if err != nil { - return nil, nil, err + return 0, 0, 0, nil, err } - if parseErr != nil { - return nil, nil, parseErr + return pn, pnLen, kp, decrypted, parseErr +} + +func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int, protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, error) { + hdrLen := 1 /* first header byte */ + u.shortHdrConnIDLen + if len(data) < hdrLen+4+16 { + return 0, 0, 0, 0, fmt.Errorf("packet too small, expected at least 20 bytes after the header, got %d", len(data)-hdrLen) } - return extHdr, decrypted, nil + origPNBytes := make([]byte, 4) + copy(origPNBytes, data[hdrLen:hdrLen+4]) + // 2. decrypt the header, assuming a 4 byte packet number + hd.DecryptHeader( + data[hdrLen+4:hdrLen+4+16], + &data[0], + data[hdrLen:hdrLen+4], + ) + // 3. parse the header (and learn the actual length of the packet number) + l, pn, pnLen, kp, parseErr := wire.ParseShortHeader(data, u.shortHdrConnIDLen) + if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { + return l, pn, pnLen, kp, parseErr + } + // 4. if the packet number is shorter than 4 bytes, replace the remaining bytes with the copy we saved earlier + if pnLen != protocol.PacketNumberLen4 { + copy(data[hdrLen+int(pnLen):hdrLen+4], origPNBytes[int(pnLen):]) + } + return l, pn, pnLen, kp, parseErr } // The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError. -func (u *packetUnpacker) unpackHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { - extHdr, err := unpackHeader(hd, hdr, data, u.version) +func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { + extHdr, err := unpackLongHeader(hd, hdr, data, u.version) if err != nil && err != wire.ErrInvalidReservedBits { return nil, &headerParseError{err: err} } return extHdr, err } -func unpackHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version protocol.VersionNumber) (*wire.ExtendedHeader, error) { +func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version protocol.VersionNumber) (*wire.ExtendedHeader, error) { r := bytes.NewReader(data) hdrLen := hdr.ParsedLen() diff --git a/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go b/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go index 288bd2e57..297c6b9d0 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go @@ -88,6 +88,25 @@ func Write(w Writer, i uint64) { } } +func Append(b []byte, i uint64) []byte { + if i <= maxVarInt1 { + return append(b, uint8(i)) + } + if i <= maxVarInt2 { + return append(b, []byte{uint8(i>>8) | 0x40, uint8(i)}...) + } + if i <= maxVarInt4 { + return append(b, []byte{uint8(i>>24) | 0x80, uint8(i >> 16), uint8(i >> 8), uint8(i)}...) + } + if i <= maxVarInt8 { + return append(b, []byte{ + uint8(i>>56) | 0xc0, uint8(i >> 48), uint8(i >> 40), uint8(i >> 32), + uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i), + }...) + } + panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) +} + // WriteWithLen writes i in the QUIC varint format with the desired length to w. func WriteWithLen(w Writer, i uint64, length protocol.ByteCount) { if length != 1 && length != 2 && length != 4 && length != 8 { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/send_queue.go b/vendor/github.com/Psiphon-Labs/quic-go/send_queue.go index 1fc8c1bf8..9eafcd374 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/send_queue.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/send_queue.go @@ -36,6 +36,13 @@ func newSendQueue(conn sendConn) sender { func (h *sendQueue) Send(p *packetBuffer) { select { case h.queue <- p: + // clear available channel if we've reached capacity + if len(h.queue) == sendQueueCapacity { + select { + case <-h.available: + default: + } + } case <-h.runStopped: default: panic("sendQueue.Send would have blocked") diff --git a/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go b/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go index d65010674..9658bf5b5 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go @@ -122,7 +122,7 @@ func (s *sendStream) Write(p []byte) (int, error) { var copied bool var deadline time.Time // As soon as dataForWriting becomes smaller than a certain size x, we copy all the data to a STREAM frame (s.nextFrame), - // which can the be popped the next time we assemble a packet. + // which can then be popped the next time we assemble a packet. // This allows us to return Write() when all data but x bytes have been sent out. // When the user now calls Close(), this is much more likely to happen before we popped that last STREAM frame, // allowing us to set the FIN bit on that frame (instead of sending an empty STREAM frame with FIN). diff --git a/vendor/github.com/Psiphon-Labs/quic-go/server.go b/vendor/github.com/Psiphon-Labs/quic-go/server.go index bb278794f..a6cffa965 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/server.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/server.go @@ -321,10 +321,37 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s } return false } + // Short header packets should never end up here in the first place + if !wire.IsLongHeaderPacket(p.data[0]) { + panic(fmt.Sprintf("misrouted packet: %#v", p.data)) + } + v, err := wire.ParseVersion(p.data) + // send a Version Negotiation Packet if the client is speaking a different protocol version + if err != nil || !protocol.IsSupportedVersion(s.config.Versions, v) { + if err != nil || p.Size() < protocol.MinUnknownVersionPacketSize { + s.logger.Debugf("Dropping a packet with an unknown version that is too small (%d bytes)", p.Size()) + if s.config.Tracer != nil { + s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + _, src, dest, err := wire.ParseArbitraryLenConnectionIDs(p.data) + if err != nil { // should never happen + s.logger.Debugf("Dropping a packet with an unknown version for which we failed to parse connection IDs") + if s.config.Tracer != nil { + s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + if !s.config.DisableVersionNegotiationPackets { + go s.sendVersionNegotiationPacket(p.remoteAddr, src, dest, p.info.OOB()) + } + return false + } // If we're creating a new connection, the packet will be passed to the connection. // The header will then be parsed again. hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDGenerator.ConnectionIDLen()) - if err != nil && err != wire.ErrUnsupportedVersion { + if err != nil { if s.config.Tracer != nil { s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError) } @@ -344,28 +371,15 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s isObfuscated := s.config.ServerMaxPacketSizeAdjustment != nil && s.config.ServerMaxPacketSizeAdjustment(p.remoteAddr) > 0 if !isObfuscated && - hdr.Type == protocol.PacketTypeInitial && p.Size() < protocol.MinInitialPacketSize { + s.logger.Debugf("Dropping a packet that is too small to be a valid Initial (%d bytes)", p.Size()) if s.config.Tracer != nil { s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropUnexpectedPacket) } return false } - // send a Version Negotiation Packet if the client is speaking a different protocol version - if !protocol.IsSupportedVersion(s.config.Versions, hdr.Version) { - if p.Size() < protocol.MinUnknownVersionPacketSize { - s.logger.Debugf("Dropping a packet with an unknown version that is too small (%d bytes)", p.Size()) - if s.config.Tracer != nil { - s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket) - } - return false - } - if !s.config.DisableVersionNegotiationPackets { - go s.sendVersionNegotiationPacket(p, hdr) - } - return false - } + if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial { // Drop long header packets. // There's little point in sending a Stateless Reset, since the client @@ -471,23 +485,24 @@ func (s *baseServer) verifyClientHelloRandom(p *receivedPacket, hdr *wire.Header // original packet data must be retained for subsequent processing. data := append([]byte(nil), p.data...) - unpacker := newPacketUnpacker(cs, protocol.Version1) - unpacked, err := unpacker.Unpack(hdr, p.rcvTime, data) + unpacker := newPacketUnpacker(cs, 0, protocol.Version1) + unpacked, err := unpacker.UnpackLongHeader(hdr, p.rcvTime, data) if err != nil { - return fmt.Errorf("verifyClientHelloRandom: Unpack: %w", err) + return fmt.Errorf("verifyClientHelloRandom: UnpackLongHeader: %w", err) } parser := wire.NewFrameParser(s.config.EnableDatagrams, protocol.Version1) - r := bytes.NewReader(unpacked.data) - for { - frame, err := parser.ParseNext(r, protocol.EncryptionInitial) + d := unpacked.data + for len(d) > 0 { + l, frame, err := parser.ParseNext(d, protocol.EncryptionInitial) if err != nil { return fmt.Errorf("verifyClientHelloRandom: ParseNext: %w", err) } if frame == nil { return errors.New("verifyClientHelloRandom: missing CRYPTO frame") } + d = d[l:] cryptoFrame, ok := frame.(*wire.CryptoFrame) if !ok { continue @@ -609,7 +624,7 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro if err != nil { return err } - s.logger.Debugf("Changing connection ID to %s.", protocol.ConnectionID(connID)) + s.logger.Debugf("Changing connection ID to %s.", connID) var conn quicConn tracingID := nextConnTracingID() if added := s.connHandler.AddWithConnID(hdr.DestConnectionID, connID, func() packetHandler { @@ -707,7 +722,7 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack replyHdr.DestConnectionID = hdr.SrcConnectionID replyHdr.Token = token if s.logger.Debug() { - s.logger.Debugf("Changing connection ID to %s.", protocol.ConnectionID(srcConnID)) + s.logger.Debugf("Changing connection ID to %s.", srcConnID) s.logger.Debugf("-> Sending Retry") replyHdr.Log(s.logger) } @@ -733,7 +748,7 @@ func (s *baseServer) maybeSendInvalidToken(p *receivedPacket, hdr *wire.Header) // This makes sure that we won't send it for packets that were corrupted. sealer, opener := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer, hdr.Version) data := p.data[:hdr.ParsedLen()+hdr.Length] - extHdr, err := unpackHeader(opener, hdr, data, hdr.Version) + extHdr, err := unpackLongHeader(opener, hdr, data, hdr.Version) if err != nil { if s.config.Tracer != nil { s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropHeaderParseError) @@ -781,13 +796,14 @@ func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer han } payloadOffset := buf.Len() - if err := ccf.Write(buf, hdr.Version); err != nil { + raw := buf.Bytes() + raw, err := ccf.Append(raw, hdr.Version) + if err != nil { return err } - raw := buf.Bytes() _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset]) - raw = raw[0 : buf.Len()+sealer.Overhead()] + raw = raw[0 : len(raw)+sealer.Overhead()] pnOffset := payloadOffset - int(replyHdr.PacketNumberLen) sealer.EncryptHeader( @@ -801,26 +817,18 @@ func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer han if s.config.Tracer != nil { s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(raw)), []logging.Frame{ccf}) } - _, err := s.conn.WritePacket(raw, remoteAddr, info.OOB()) + _, err = s.conn.WritePacket(raw, remoteAddr, info.OOB()) return err } -func (s *baseServer) sendVersionNegotiationPacket(p *receivedPacket, hdr *wire.Header) { - s.logger.Debugf("Client offered version %s, sending Version Negotiation", hdr.Version) - data := wire.ComposeVersionNegotiation(hdr.SrcConnectionID, hdr.DestConnectionID, s.config.Versions) +func (s *baseServer) sendVersionNegotiationPacket(remote net.Addr, src, dest protocol.ArbitraryLenConnectionID, oob []byte) { + s.logger.Debugf("Client offered version %s, sending Version Negotiation") + + data := wire.ComposeVersionNegotiation(dest, src, s.config.Versions) if s.config.Tracer != nil { - s.config.Tracer.SentPacket( - p.remoteAddr, - &wire.Header{ - IsLongHeader: true, - DestConnectionID: hdr.SrcConnectionID, - SrcConnectionID: hdr.DestConnectionID, - }, - protocol.ByteCount(len(data)), - nil, - ) + s.config.Tracer.SentVersionNegotiationPacket(remote, src, dest, s.config.Versions) } - if _, err := s.conn.WritePacket(data, p.remoteAddr, p.info.OOB()); err != nil { + if _, err := s.conn.WritePacket(data, remote, oob); err != nil { s.logger.Debugf("Error sending Version Negotiation: %s", err) } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_oob.go b/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_oob.go index a02d845ee..da6273c16 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_oob.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_oob.go @@ -123,10 +123,15 @@ func newConn(c OOBCapablePacketConn) (*oobConn, error) { bc = ipv4.NewPacketConn(c) } + msgs := make([]ipv4.Message, batchSize) + for i := range msgs { + // preallocate the [][]byte + msgs[i].Buffers = make([][]byte, 1) + } oobConn := &oobConn{ OOBCapablePacketConn: c, batchConn: bc, - messages: make([]ipv4.Message, batchSize), + messages: msgs, readPos: batchSize, } for i := 0; i < batchSize; i++ { @@ -143,7 +148,7 @@ func (c *oobConn) ReadPacket() (*receivedPacket, error) { buffer := getPacketBuffer() buffer.Data = buffer.Data[:protocol.MaxPacketBufferSize] c.buffers[i] = buffer - c.messages[i].Buffers = [][]byte{c.buffers[i].Data} + c.messages[i].Buffers[0] = c.buffers[i].Data } c.readPos = 0 @@ -157,18 +162,20 @@ func (c *oobConn) ReadPacket() (*receivedPacket, error) { msg := c.messages[c.readPos] buffer := c.buffers[c.readPos] c.readPos++ - ctrlMsgs, err := unix.ParseSocketControlMessage(msg.OOB[:msg.NN]) - if err != nil { - return nil, err - } + + data := msg.OOB[:msg.NN] var ecn protocol.ECN var destIP net.IP var ifIndex uint32 - for _, ctrlMsg := range ctrlMsgs { - if ctrlMsg.Header.Level == unix.IPPROTO_IP { - switch ctrlMsg.Header.Type { + for len(data) > 0 { + hdr, body, remainder, err := unix.ParseOneSocketControlMessage(data) + if err != nil { + return nil, err + } + if hdr.Level == unix.IPPROTO_IP { + switch hdr.Type { case msgTypeIPTOS: - ecn = protocol.ECN(ctrlMsg.Data[0] & ecnMask) + ecn = protocol.ECN(body[0] & ecnMask) case msgTypeIPv4PKTINFO: // struct in_pktinfo { // unsigned int ipi_ifindex; /* Interface index */ @@ -177,33 +184,34 @@ func (c *oobConn) ReadPacket() (*receivedPacket, error) { // address */ // }; ip := make([]byte, 4) - if len(ctrlMsg.Data) == 12 { - ifIndex = binary.LittleEndian.Uint32(ctrlMsg.Data) - copy(ip, ctrlMsg.Data[8:12]) - } else if len(ctrlMsg.Data) == 4 { + if len(body) == 12 { + ifIndex = binary.LittleEndian.Uint32(body) + copy(ip, body[8:12]) + } else if len(body) == 4 { // FreeBSD - copy(ip, ctrlMsg.Data) + copy(ip, body) } destIP = net.IP(ip) } } - if ctrlMsg.Header.Level == unix.IPPROTO_IPV6 { - switch ctrlMsg.Header.Type { + if hdr.Level == unix.IPPROTO_IPV6 { + switch hdr.Type { case unix.IPV6_TCLASS: - ecn = protocol.ECN(ctrlMsg.Data[0] & ecnMask) + ecn = protocol.ECN(body[0] & ecnMask) case msgTypeIPv6PKTINFO: // struct in6_pktinfo { // struct in6_addr ipi6_addr; /* src/dst IPv6 address */ // unsigned int ipi6_ifindex; /* send/recv interface index */ // }; - if len(ctrlMsg.Data) == 20 { + if len(body) == 20 { ip := make([]byte, 16) - copy(ip, ctrlMsg.Data[:16]) + copy(ip, body[:16]) destIP = net.IP(ip) - ifIndex = binary.LittleEndian.Uint32(ctrlMsg.Data[16:]) + ifIndex = binary.LittleEndian.Uint32(body[16:]) } } } + data = remainder } var info *packetInfo if destIP != nil { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/tools.go b/vendor/github.com/Psiphon-Labs/quic-go/tools.go index c801db36c..90047a4a9 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/tools.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/tools.go @@ -6,6 +6,7 @@ package quic import ( // [Psiphon] // Avoid vendoring testing dependencies +// // _ "github.com/golang/mock/mockgen" -// _ "github.com/onsi/ginkgo/ginkgo" +// _ "github.com/onsi/ginkgo/v2/ginkgo" ) diff --git a/vendor/github.com/marten-seemann/qpack/README.md b/vendor/github.com/marten-seemann/qpack/README.md index a621825ac..6ba4bad4a 100644 --- a/vendor/github.com/marten-seemann/qpack/README.md +++ b/vendor/github.com/marten-seemann/qpack/README.md @@ -1,10 +1,9 @@ # QPACK [![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/marten-seemann/qpack) -[![CircleCI Build Status](https://img.shields.io/circleci/project/github/marten-seemann/qpack.svg?style=flat-square&label=CircleCI+build)](https://circleci.com/gh/marten-seemann/qpack) [![Code Coverage](https://img.shields.io/codecov/c/github/marten-seemann/qpack/master.svg?style=flat-square)](https://codecov.io/gh/marten-seemann/qpack) -This is a minimal QPACK implementation in Go. It is minimal in the sense that it doesn't use the dynamic table at all, but just the static table and (Huffman encoded) string literals. Wherever possible, it reuses code from the [HPACK implementation in the Go standard library](https://github.com/golang/net/tree/master/http2/hpack). +This is a minimal QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) implementation in Go. It is minimal in the sense that it doesn't use the dynamic table at all, but just the static table and (Huffman encoded) string literals. Wherever possible, it reuses code from the [HPACK implementation in the Go standard library](https://github.com/golang/net/tree/master/http2/hpack). It should be able to interoperate with other QPACK implemetations (both encoders and decoders), however it won't achieve a high compression efficiency. diff --git a/vendor/github.com/marten-seemann/qpack/encoder.go b/vendor/github.com/marten-seemann/qpack/encoder.go index 13e0ad2c7..ad6953537 100644 --- a/vendor/github.com/marten-seemann/qpack/encoder.go +++ b/vendor/github.com/marten-seemann/qpack/encoder.go @@ -51,9 +51,9 @@ func (e *Encoder) WriteField(f HeaderField) error { e.writeLiteralFieldWithoutNameReference(f) } - e.w.Write(e.buf) + _, err := e.w.Write(e.buf) e.buf = e.buf[:0] - return nil + return err } // Close declares that the encoding is complete and resets the Encoder diff --git a/vendor/github.com/marten-seemann/qpack/static_table.go b/vendor/github.com/marten-seemann/qpack/static_table.go index 930e83c7c..73c365e1f 100644 --- a/vendor/github.com/marten-seemann/qpack/static_table.go +++ b/vendor/github.com/marten-seemann/qpack/static_table.go @@ -104,6 +104,7 @@ var staticTableEntries = [...]HeaderField{ // Only needed for tests. // use go:linkname to retrieve the static table. +// //nolint:deadcode,unused func getStaticTable() []HeaderField { return staticTableEntries[:] diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 453a942c5..3865943f6 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -52,6 +52,20 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) { return msgs, nil } +// ParseOneSocketControlMessage parses a single socket control message from b, returning the message header, +// message data (a slice of b), and the remainder of b after that single message. +// When there are no remaining messages, len(remainder) == 0. +func ParseOneSocketControlMessage(b []byte) (hdr Cmsghdr, data []byte, remainder []byte, err error) { + h, dbuf, err := socketControlMessageHeaderAndData(b) + if err != nil { + return Cmsghdr{}, nil, nil, err + } + if i := cmsgAlignOf(int(h.Len)); i < len(b) { + remainder = b[i:] + } + return *h, dbuf, remainder, nil +} + func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { h := (*Cmsghdr)(unsafe.Pointer(&b[0])) if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index e044d5b54..c5a98440e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1554,6 +1554,7 @@ func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Sockle var iova [1]Iovec iova[0].Base = &dummy iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml deleted file mode 100644 index 04d4dae09..000000000 --- a/vendor/gopkg.in/yaml.v3/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "tip" - -go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go index 65846e674..ae7d049f1 100644 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -108,6 +108,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index be63169b7..0173b6982 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -35,6 +35,7 @@ type parser struct { doc *Node anchors map[string]*Node doneInit bool + textless bool } func newParser(b []byte) *parser { @@ -99,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -108,14 +112,18 @@ func (p *parser) peek() yaml_event_type_t { func (p *parser) fail() { var where string var line int - if p.parser.problem_mark.line != 0 { + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line // Scanner errors don't iterate line before returning error if p.parser.error == yaml_SCANNER_ERROR { line++ } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " @@ -169,17 +177,20 @@ func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { } else if kind == ScalarNode { tag, _ = resolve("", value) } - return &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - Line: p.event.start_mark.line + 1, - Column: p.event.start_mark.column + 1, - HeadComment: string(p.event.head_comment), - LineComment: string(p.event.line_comment), - FootComment: string(p.event.foot_comment), + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) } + return n } func (p *parser) parseChild(parent *Node) *Node { @@ -312,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -497,8 +510,13 @@ func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { good = d.mapping(n, out) case SequenceNode: good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough default: - panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + failf("cannot decode node with unknown kind %d", n.Kind) } return good } @@ -533,6 +551,17 @@ func resetMap(out reflect.Value) { } } +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + func (d *decoder) scalar(n *Node, out reflect.Value) bool { var tag string var resolved interface{} @@ -550,14 +579,7 @@ func (d *decoder) scalar(n *Node, out reflect.Value) bool { } } if resolved == nil { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false + return d.null(out) } if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { // We've resolved to exactly the type we want, so use that. @@ -791,16 +813,30 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) + mapIsNew = true } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -809,11 +845,17 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { failf("invalid map key: %#v", k.Interface()) } e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) { + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { out.SetMapIndex(k, e) } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -825,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -842,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -851,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -860,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -892,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -899,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -924,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go index ab2a06619..0f47c9ca8 100644 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -235,10 +235,13 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent = 0 } } else if !indentless { - emitter.indent += emitter.best_indent - // [Go] If inside a block sequence item, discount the space taken by the indicator. - if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) } } return true @@ -725,16 +728,9 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - // [Go] The original logic here would not indent the sequence when inside a mapping. - // In Go we always indent it, but take the sequence indicator out of the indentation. - indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } - if emitter.indent > original+2 { - emitter.indent -= 2 - } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] @@ -785,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -810,6 +813,27 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { return false @@ -823,6 +847,10 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return true } +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { @@ -1866,7 +1894,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } //emitter.indention = true @@ -1903,10 +1931,10 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } + //emitter.indention = true emitter.whitespace = true diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go index 1f37271ce..de9e72a3e 100644 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -119,6 +119,14 @@ func (e *encoder) marshal(tag string, in reflect.Value) { case *Node: e.nodev(in) return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return case time.Time: e.timev(tag, in) return @@ -422,18 +430,23 @@ func (e *encoder) nodev(in reflect.Value) { } func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + // If the tag was not explicitly requested, and dropping it won't change the // implicit tag of the value, don't include it in the presentation. var tag = node.Tag var stag = shortTag(tag) - var rtag string var forceQuoting bool if tag != "" && node.Style&TaggedStyle == 0 { if node.Kind == ScalarNode { if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { tag = "" } else { - rtag, _ = resolve("", node.Value) + rtag, _ := resolve("", node.Value) if rtag == stag { tag = "" } else if stag == strTag { @@ -442,6 +455,7 @@ func (e *encoder) node(node *Node, tail string) { } } } else { + var rtag string switch node.Kind { case MappingNode: rtag = mapTag @@ -471,7 +485,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_SEQUENCE_STYLE } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) e.event.head_comment = []byte(node.HeadComment) e.emit() for _, node := range node.Content { @@ -487,7 +501,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_MAPPING_STYLE } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) e.event.tail_comment = []byte(tail) e.event.head_comment = []byte(node.HeadComment) e.emit() @@ -528,11 +542,11 @@ func (e *encoder) node(node *Node, tail string) { case ScalarNode: value := node.Value if !utf8.ValidString(value) { - if tag == binaryTag { + if stag == binaryTag { failf("explicitly tagged !!binary data must be base64-encoded") } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) } // It can't be encoded directly as YAML so use a binary tag // and encode it as base64. @@ -557,5 +571,7 @@ func (e *encoder) node(node *Node, tail string) { } e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) } } diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index aea9050b8..268558a0d 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -648,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -683,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -694,25 +701,13 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark - prior_head := len(parser.head_comment) + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) @@ -754,7 +749,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -780,6 +777,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* @@ -793,6 +816,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -902,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go index 57e954ca5..ca0070108 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -749,6 +749,11 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { if !ok { return } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } if !yaml_parser_scan_line_comment(parser, comment_mark) { ok = false return @@ -2255,10 +2260,9 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l } } if parser.buffer[parser.buffer_pos] == '#' { - // TODO Test this and then re-enable it. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -2856,13 +2860,12 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t return false } skip_line(parser) - } else { - if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = append(text, parser.buffer[parser.buffer_pos]) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark } + text = read(parser, text) + } else { skip(parser) } } @@ -2888,6 +2891,10 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo var token_mark = token.start_mark var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } var recent_empty = false var first_empty = parser.newlines <= 1 @@ -2919,15 +2926,18 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo continue } c := parser.buffer[parser.buffer_pos+peek] - if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { // Got line break or terminator. - if !recent_empty { - if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { // This is the first empty line and there were no empty lines before, // so this initial part of the comment is a foot of the prior token // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. if len(text) > 0 { - if start_mark.column-1 < parser.indent { + if start_mark.column-1 < next_indent { // If dedented it's unrelated to the prior token. token_mark = start_mark } @@ -2958,7 +2968,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo continue } - if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { // The comment at the different indentation is a foot of the // preceding data rather than a head of the upcoming one. parser.comments = append(parser.comments, yaml_comment_t{ @@ -2999,10 +3009,9 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo return false } skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) } else { - if parser.mark.index >= seen { - text = append(text, parser.buffer[parser.buffer_pos]) - } skip(parser) } } @@ -3010,6 +3019,10 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo peek = 0 column = 0 line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } } if len(text) > 0 { diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go index b5d35a50d..8cec6da48 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -89,7 +89,7 @@ func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { parser *parser knownFields bool @@ -194,7 +194,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -252,6 +252,24 @@ func (e *Encoder) Encode(v interface{}) (err error) { return nil } +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + // SetIndent changes the used indentation used when encoding. func (e *Encoder) SetIndent(spaces int) { if spaces < 0 { @@ -328,6 +346,12 @@ const ( // and maps, Node is an intermediate representation that allows detailed // control over the content being decoded or encoded. // +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// // Values that make use of the Node type interact with the yaml package in the // same way any other type would do, by encoding and decoding yaml data // directly or indirectly into them. @@ -391,6 +415,13 @@ type Node struct { Column int } +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. @@ -418,6 +449,11 @@ func (n *Node) ShortTag() string { case ScalarNode: tag, _ := resolve("", n.Value) return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } } return "" } diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go index 2719cfbb0..7c6d00770 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -787,6 +787,8 @@ type yaml_emitter_t struct { foot_comment []byte tail_comment []byte + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/modules.txt b/vendor/modules.txt index 4a48487a9..3ff799bac 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -22,7 +22,7 @@ github.com/Psiphon-Labs/qtls-go1-18 # github.com/Psiphon-Labs/qtls-go1-19 v0.0.0-20221014165721-ed28749db082 ## explicit; go 1.18 github.com/Psiphon-Labs/qtls-go1-19 -# github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3 +# github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 ## explicit; go 1.18 github.com/Psiphon-Labs/quic-go github.com/Psiphon-Labs/quic-go/http3 @@ -149,8 +149,8 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/marten-seemann/qpack v0.2.1 -## explicit; go 1.14 +# github.com/marten-seemann/qpack v0.3.0 +## explicit; go 1.18 github.com/marten-seemann/qpack # github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a ## explicit @@ -271,7 +271,7 @@ golang.org/x/net/trace # golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 +# golang.org/x/sys v0.2.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -342,7 +342,7 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb -# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c +# gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 # honnef.co/go/tools v0.2.1 From af93d503879dc612cf553c0bf46cb98867238486 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Tue, 24 Jan 2023 12:42:51 -0500 Subject: [PATCH 04/44] Update UDP socket comment --- psiphon/UDPConn.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/psiphon/UDPConn.go b/psiphon/UDPConn.go index 9841e1043..2bc3854bb 100644 --- a/psiphon/UDPConn.go +++ b/psiphon/UDPConn.go @@ -119,19 +119,20 @@ func NewUDPConn( network = "udp6" } - // It's necessary to create an unbound UDP socket, for use with WriteTo, - // as required by quic-go. As documented in net.ListenUDP: with an - // unspecified IP address, the resulting conn "listens on all available - // IP addresses of the local system except multicast IP addresses". + // It's necessary to create an "unconnected" UDP socket, for use with + // WriteTo, as required by quic-go. As documented in net.ListenUDP: with + // an unspecified IP address, the resulting conn "listens on all + // available IP addresses of the local system except multicast IP + // addresses". // // Limitation: these UDP sockets are not necessarily closed when a device // changes active network (e.g., WiFi to mobile). It's possible that a // QUIC connection does not immediately close on a network change, and // instead outbound packets are sent from a different active interface. // As quic-go does not yet support connection migration, these packets - // will be dropped by the server. This situation is mitigated by network - // change event detection, which initiates new tunnel connections, and by - // timeouts/keep-alives. + // will be dropped by the server. This situation is mitigated by use of + // DeviceBinder; by network change event detection, which initiates new + // tunnel connections; and by timeouts/keep-alives. conn, err := listen.ListenPacket(ctx, network, "") if err != nil { From e8b9f0e99dca5ada9f22d06d5033157df3a61c47 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Fri, 27 Jan 2023 12:10:39 -0500 Subject: [PATCH 05/44] Add device_region to homepageQueryParameterSubstitution --- psiphon/server/api.go | 11 ++++++++++- psiphon/server/psinet/psinet.go | 29 +++++++++++++++++------------ psiphon/server/tunnelServer.go | 4 +++- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/psiphon/server/api.go b/psiphon/server/api.go index 48b9369e5..1360147e9 100644 --- a/psiphon/server/api.go +++ b/psiphon/server/api.go @@ -241,6 +241,11 @@ func handshakeAPIRequestHandler( } } + deviceRegion, ok := getOptionalStringRequestParam(params, "device_region") + if !ok { + deviceRegion = GEOIP_UNKNOWN_VALUE + } + // Note: no guarantee that PsinetDatabase won't reload between database calls db := support.PsinetDatabase @@ -261,6 +266,7 @@ func handshakeAPIRequestHandler( domainBytesChecksum: domainBytesChecksum, establishedTunnelsCount: establishedTunnelsCount, splitTunnelLookup: splitTunnelLookup, + deviceRegion: deviceRegion, }, authorizations) if err != nil { @@ -367,9 +373,12 @@ func handshakeAPIRequestHandler( // the JSON response, return an empty array instead of null for legacy // clients. + homepages := db.GetRandomizedHomepages( + sponsorID, geoIPData.Country, geoIPData.ASN, deviceRegion, isMobile) + handshakeResponse := protocol.HandshakeResponse{ SSHSessionID: sessionID, - Homepages: db.GetRandomizedHomepages(sponsorID, geoIPData.Country, geoIPData.ASN, isMobile), + Homepages: homepages, UpgradeClientVersion: db.GetUpgradeClientVersion(clientVersion, normalizedPlatform), PageViewRegexes: make([]map[string]string, 0), HttpsRequestRegexes: httpsRequestRegexes, diff --git a/psiphon/server/psinet/psinet.go b/psiphon/server/psinet/psinet.go index 90976dd8f..cda2407c6 100644 --- a/psiphon/server/psinet/psinet.go +++ b/psiphon/server/psinet/psinet.go @@ -137,9 +137,9 @@ func NewDatabase(filename string) (*Database, error) { // GetRandomizedHomepages returns a randomly ordered list of home pages // for the specified sponsor, region, and platform. func (db *Database) GetRandomizedHomepages( - sponsorID, clientRegion, clientASN string, isMobilePlatform bool) []string { + sponsorID, clientRegion, clientASN, deviceRegion string, isMobilePlatform bool) []string { - homepages := db.GetHomepages(sponsorID, clientRegion, clientASN, isMobilePlatform) + homepages := db.GetHomepages(sponsorID, clientRegion, clientASN, deviceRegion, isMobilePlatform) if len(homepages) > 1 { shuffledHomepages := make([]string, len(homepages)) perm := rand.Perm(len(homepages)) @@ -154,7 +154,7 @@ func (db *Database) GetRandomizedHomepages( // GetHomepages returns a list of home pages for the specified sponsor, // region, and platform. func (db *Database) GetHomepages( - sponsorID, clientRegion, clientASN string, isMobilePlatform bool) []string { + sponsorID, clientRegion, clientASN, deviceRegion string, isMobilePlatform bool) []string { db.ReloadableFile.RLock() defer db.ReloadableFile.RUnlock() @@ -187,7 +187,8 @@ func (db *Database) GetHomepages( if ok { for _, homePage := range homePagesByRegion { sponsorHomePages = append( - sponsorHomePages, homepageQueryParameterSubstitution(homePage.URL, clientRegion, clientASN)) + sponsorHomePages, homepageQueryParameterSubstitution( + homePage.URL, clientRegion, clientASN, deviceRegion)) } } @@ -198,7 +199,8 @@ func (db *Database) GetHomepages( for _, homePage := range defaultHomePages { // client_region query parameter substitution sponsorHomePages = append( - sponsorHomePages, homepageQueryParameterSubstitution(homePage.URL, clientRegion, clientASN)) + sponsorHomePages, homepageQueryParameterSubstitution( + homePage.URL, clientRegion, clientASN, deviceRegion)) } } } @@ -207,17 +209,18 @@ func (db *Database) GetHomepages( } func homepageQueryParameterSubstitution( - url, clientRegion, clientASN string) string { + url, clientRegion, clientASN, deviceRegion string) string { - return strings.Replace( - strings.Replace(url, "client_region=XX", "client_region="+clientRegion, 1), - "client_asn=XX", "client_asn="+clientASN, 1) + url = strings.Replace(url, "client_region=XX", "client_region="+clientRegion, 1) + url = strings.Replace(url, "client_asn=XX", "client_asn="+clientASN, 1) + url = strings.Replace(url, "device_region=XX", "device_region="+deviceRegion, 1) + return url } // GetAlertActionURLs returns a list of alert action URLs for the specified // alert reason and sponsor. func (db *Database) GetAlertActionURLs( - alertReason, sponsorID, clientRegion, clientASN string) []string { + alertReason, sponsorID, clientRegion, clientASN, deviceRegion string) []string { db.ReloadableFile.RLock() defer db.ReloadableFile.RUnlock() @@ -231,14 +234,16 @@ func (db *Database) GetAlertActionURLs( if sponsor != nil { for _, URL := range sponsor.AlertActionURLs[alertReason] { actionURLs = append( - actionURLs, homepageQueryParameterSubstitution(URL, clientRegion, clientASN)) + actionURLs, homepageQueryParameterSubstitution( + URL, clientRegion, clientASN, deviceRegion)) } } if len(actionURLs) == 0 { for _, URL := range db.DefaultAlertActionURLs[alertReason] { actionURLs = append( - actionURLs, homepageQueryParameterSubstitution(URL, clientRegion, clientASN)) + actionURLs, homepageQueryParameterSubstitution( + URL, clientRegion, clientASN, deviceRegion)) } } diff --git a/psiphon/server/tunnelServer.go b/psiphon/server/tunnelServer.go index ec74bf11c..369074218 100644 --- a/psiphon/server/tunnelServer.go +++ b/psiphon/server/tunnelServer.go @@ -1560,6 +1560,7 @@ type handshakeState struct { domainBytesChecksum []byte establishedTunnelsCount int splitTunnelLookup *splitTunnelLookup + deviceRegion string } type destinationBytesMetrics struct { @@ -3194,7 +3195,8 @@ func (sshClient *sshClient) getAlertActionURLs(alertReason string) []string { alertReason, sponsorID, sshClient.geoIPData.Country, - sshClient.geoIPData.ASN) + sshClient.geoIPData.ASN, + sshClient.handshakeState.deviceRegion) } func (sshClient *sshClient) rejectNewChannel(newChannel ssh.NewChannel, logMessage string) { From 439efab6b7812f85ee55cba063741f5a02598e0c Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Fri, 27 Jan 2023 12:31:40 -0500 Subject: [PATCH 06/44] Fail to start for non-working fronted configurations --- psiphon/server/config.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/psiphon/server/config.go b/psiphon/server/config.go index b6e9e21d6..2cec6b332 100644 --- a/psiphon/server/config.go +++ b/psiphon/server/config.go @@ -588,6 +588,20 @@ func LoadConfig(configJSON []byte) (*Config, error) { tunnelProtocol) } } + + // For FRONTED QUIC and HTTP, HTTPS is always used on the + // edge-to-server hop, so it must be enabled or else this + // configuration will not work. There is no FRONTED QUIC listener at + // all; see TunnelServer.Run. + if protocol.TunnelProtocolUsesFrontedMeek(tunnelProtocol) { + _, ok := config.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_FRONTED_MEEK] + if !ok { + return nil, errors.Tracef( + "Tunnel protocol %s requires %s to be enabled", + tunnelProtocol, + protocol.TUNNEL_PROTOCOL_FRONTED_MEEK) + } + } } for tunnelProtocol, address := range config.TunnelProtocolPassthroughAddresses { From d82ff79ab9babcf26b5cfcbf331e5e21259686e5 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Fri, 27 Jan 2023 12:40:33 -0500 Subject: [PATCH 07/44] Prefer getNetworkCountryIso over getSimCountryIso --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 67cc0eb77..d52e7144f 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -1138,16 +1138,23 @@ private static String getDeviceRegion(Context context) { String region = ""; TelephonyManager telephonyManager = (TelephonyManager)context.getSystemService(Context.TELEPHONY_SERVICE); if (telephonyManager != null) { - region = telephonyManager.getSimCountryIso(); - if (region == null) { - region = ""; - } - if (region.length() == 0 && telephonyManager.getPhoneType() != TelephonyManager.PHONE_TYPE_CDMA) { + // getNetworkCountryIso, when present, is preferred over + // getSimCountryIso, since getNetworkCountryIso is the network + // the device is currently on, while getSimCountryIso is the home + // region of the SIM. While roaming, only getNetworkCountryIso + // may more accurately represent the actual device region. + if (telephonyManager.getPhoneType() != TelephonyManager.PHONE_TYPE_CDMA) { region = telephonyManager.getNetworkCountryIso(); if (region == null) { region = ""; } } + if (region.length() == 0) { + region = telephonyManager.getSimCountryIso(); + if (region == null) { + region = ""; + } + } } if (region.length() == 0) { Locale defaultLocale = Locale.getDefault(); From 192f1a5755a4fd1cb335136a52aaa37bcd050ae6 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Fri, 27 Jan 2023 15:10:55 -0500 Subject: [PATCH 08/44] Use Go 1.19.5 --- .github/workflows/tests.yml | 2 +- ClientLibrary/Dockerfile | 2 +- ClientLibrary/build-darwin.sh | 4 ++-- ConsoleClient/Dockerfile | 2 +- MobileLibrary/Android/Dockerfile | 2 +- MobileLibrary/iOS/build-psiphon-framework.sh | 2 +- Server/Dockerfile-binary-builder | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 10e549dbf..cad2ea6ce 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,7 +15,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu" ] - go: [ "1.19.2" ] + go: [ "1.19.5" ] test-type: [ "detector", "coverage", "memory" ] runs-on: ${{ matrix.os }}-latest diff --git a/ClientLibrary/Dockerfile b/ClientLibrary/Dockerfile index d447bbf6c..16abfcf13 100644 --- a/ClientLibrary/Dockerfile +++ b/ClientLibrary/Dockerfile @@ -21,7 +21,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ # Install Go. # NOTE: Go 1.10+ is required to build c-shared for windows (https://github.com/golang/go/commit/bb0bfd002ada7e3eb9198d4287b32c2fed6e8da6) -ENV GOVERSION=go1.19.2 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.19.5 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/ClientLibrary/build-darwin.sh b/ClientLibrary/build-darwin.sh index 9710aaa65..6e1bef92d 100755 --- a/ClientLibrary/build-darwin.sh +++ b/ClientLibrary/build-darwin.sh @@ -9,8 +9,8 @@ if [ -z ${2+x} ]; then BUILD_TAGS=""; else BUILD_TAGS="$2"; fi # Note: # clangwrap.sh needs to be updated when the Go version changes. # The last version was: -# https://github.com/golang/go/blob/go1.19.2/misc/ios/clangwrap.sh -GO_VERSION_REQUIRED="1.19.2" +# https://github.com/golang/go/blob/go1.19.5/misc/ios/clangwrap.sh +GO_VERSION_REQUIRED="1.19.5" BASE_DIR=$(cd "$(dirname "$0")" ; pwd -P) cd ${BASE_DIR} diff --git a/ConsoleClient/Dockerfile b/ConsoleClient/Dockerfile index ef20c0049..b319e14ee 100644 --- a/ConsoleClient/Dockerfile +++ b/ConsoleClient/Dockerfile @@ -22,7 +22,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go. -ENV GOVERSION=go1.19.2 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.19.5 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/MobileLibrary/Android/Dockerfile b/MobileLibrary/Android/Dockerfile index 48b70856a..574e61b05 100644 --- a/MobileLibrary/Android/Dockerfile +++ b/MobileLibrary/Android/Dockerfile @@ -22,7 +22,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go. -ENV GOVERSION=go1.19.2 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.19.5 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/MobileLibrary/iOS/build-psiphon-framework.sh b/MobileLibrary/iOS/build-psiphon-framework.sh index fb1dfaa91..5f0106b6c 100755 --- a/MobileLibrary/iOS/build-psiphon-framework.sh +++ b/MobileLibrary/iOS/build-psiphon-framework.sh @@ -17,7 +17,7 @@ set -e -u -x if [ -z ${1+x} ]; then BUILD_TAGS=""; else BUILD_TAGS="$1"; fi # Modify this value as we use newer Go versions. -GO_VERSION_REQUIRED="1.19.2" +GO_VERSION_REQUIRED="1.19.5" # At this time, psiphon-tunnel-core doesn't support modules export GO111MODULE=off diff --git a/Server/Dockerfile-binary-builder b/Server/Dockerfile-binary-builder index 2bbbbe1af..325e1c854 100644 --- a/Server/Dockerfile-binary-builder +++ b/Server/Dockerfile-binary-builder @@ -1,6 +1,6 @@ FROM alpine:3.10.2 -ENV GOLANG_VERSION 1.19.2 +ENV GOLANG_VERSION 1.19.5 ENV GOLANG_SRC_URL https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz RUN set -ex \ From 42e3f41a149a9309987eb7469d9bf0e4ee193872 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Mon, 30 Jan 2023 15:00:51 -0500 Subject: [PATCH 09/44] Meek HTTP header fixes - Don't default to including X-Psiphon-Fronting-Address for non-fronted HTTPS; only include when configured by tactics - Always include any selected User-Agent (the metric user_agent already reports the selection) --- psiphon/common/parameters/parameters.go | 21 ++++++++++++ psiphon/common/protocol/protocol.go | 20 +++++++++++ psiphon/dialParameters.go | 20 ++++++++--- psiphon/meekConn.go | 45 +++++++++++++++++-------- 4 files changed, 88 insertions(+), 18 deletions(-) diff --git a/psiphon/common/parameters/parameters.go b/psiphon/common/parameters/parameters.go index 0739abe42..f4a117b04 100755 --- a/psiphon/common/parameters/parameters.go +++ b/psiphon/common/parameters/parameters.go @@ -320,6 +320,7 @@ const ( DNSResolverIncludeEDNS0Probability = "DNSResolverIncludeEDNS0Probability" DNSResolverCacheExtensionInitialTTL = "DNSResolverCacheExtensionInitialTTL" DNSResolverCacheExtensionVerifiedTTL = "DNSResolverCacheExtensionVerifiedTTL" + AddFrontingProviderPsiphonFrontingHeader = "AddFrontingProviderPsiphonFrontingHeader" ) const ( @@ -675,6 +676,8 @@ var defaultParameters = map[string]struct { DNSResolverIncludeEDNS0Probability: {value: 0.0, minimum: 0.0}, DNSResolverCacheExtensionInitialTTL: {value: time.Duration(0), minimum: time.Duration(0)}, DNSResolverCacheExtensionVerifiedTTL: {value: time.Duration(0), minimum: time.Duration(0)}, + + AddFrontingProviderPsiphonFrontingHeader: {value: protocol.LabeledTunnelProtocols{}}, } // IsServerSideOnly indicates if the parameter specified by name is used @@ -902,6 +905,15 @@ func (p *Parameters) Set( return nil, errors.Trace(err) } } + case protocol.LabeledTunnelProtocols: + if skipOnError { + newValue = v.PruneInvalid() + } else { + err := v.Validate() + if err != nil { + return nil, errors.Trace(err) + } + } case protocol.TLSProfiles: if skipOnError { newValue = v.PruneInvalid(customTLSProfileNames) @@ -1323,6 +1335,15 @@ func (p ParametersAccessor) TunnelProtocols(name string) protocol.TunnelProtocol return value } +// LabeledTunnelProtocols returns a protocol.TunnelProtocols parameter value +// corresponding to the specified labeled set and label value. The return +// value is nil when no set is found. +func (p ParametersAccessor) LabeledTunnelProtocols(name, label string) protocol.TunnelProtocols { + var value protocol.LabeledTunnelProtocols + p.snapshot.getValue(name, &value) + return value[label] +} + // TLSProfiles returns a protocol.TLSProfiles parameter value. // If there is a corresponding Probability value, a weighted coin flip // will be performed and, depending on the result, the value or the diff --git a/psiphon/common/protocol/protocol.go b/psiphon/common/protocol/protocol.go index 4e69c23a8..389486b60 100644 --- a/psiphon/common/protocol/protocol.go +++ b/psiphon/common/protocol/protocol.go @@ -124,6 +124,26 @@ func (t TunnelProtocols) PruneInvalid() TunnelProtocols { return u } +type LabeledTunnelProtocols map[string]TunnelProtocols + +func (labeledProtocols LabeledTunnelProtocols) Validate() error { + for _, protocols := range labeledProtocols { + err := protocols.Validate() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (labeledProtocols LabeledTunnelProtocols) PruneInvalid() LabeledTunnelProtocols { + l := make(LabeledTunnelProtocols) + for label, protocols := range labeledProtocols { + l[label] = protocols.PruneInvalid() + } + return l +} + var SupportedTunnelProtocols = TunnelProtocols{ TUNNEL_PROTOCOL_SSH, TUNNEL_PROTOCOL_OBFUSCATED_SSH, diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index 1a013c839..1b08e38e0 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -934,6 +934,17 @@ func MakeDialParameters( if protocol.TunnelProtocolUsesMeek(dialParams.TunnelProtocol) || dialParams.ConjureAPIRegistration { + // For tactics requests, AddPsiphonFrontingHeader is set when set for + // the related tunnel protocol. E.g., FRONTED-OSSH-MEEK for + // FRONTED-MEEK-TACTICS. AddPsiphonFrontingHeader is not replayed. + addPsiphonFrontingHeader := false + if dialParams.FrontingProviderID != "" { + addPsiphonFrontingHeader = common.Contains( + p.LabeledTunnelProtocols( + parameters.AddFrontingProviderPsiphonFrontingHeader, dialParams.FrontingProviderID), + dialParams.TunnelProtocol) + } + dialParams.meekConfig = &MeekConfig{ DiagnosticID: serverEntry.GetDiagnosticID(), Parameters: config.GetParameters(), @@ -949,6 +960,7 @@ func MakeDialParameters( RandomizedTLSProfileSeed: dialParams.RandomizedTLSProfileSeed, UseObfuscatedSessionTickets: dialParams.TunnelProtocol == protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET, SNIServerName: dialParams.MeekSNIServerName, + AddPsiphonFrontingHeader: addPsiphonFrontingHeader, VerifyServerName: dialParams.MeekVerifyServerName, VerifyPins: dialParams.MeekVerifyPins, HostHeader: dialParams.MeekHostHeader, @@ -1066,11 +1078,11 @@ func (dialParams *DialParameters) GetTLSVersionForMetrics() string { // There are two concerns regarding which dial parameter fields are safe to // exchange: // -// - Unlike signed server entries, there's no independent trust anchor -// that can certify that the exchange data is valid. +// - Unlike signed server entries, there's no independent trust anchor +// that can certify that the exchange data is valid. // -// - While users should only perform the exchange with trusted peers, -// the user's trust in their peer may be misplaced. +// - While users should only perform the exchange with trusted peers, +// the user's trust in their peer may be misplaced. // // This presents the possibility of attack such as the peer sending dial // parameters that could be used to trace/monitor/flag the importer; or diff --git a/psiphon/meekConn.go b/psiphon/meekConn.go index ea0e694db..5cffdad92 100644 --- a/psiphon/meekConn.go +++ b/psiphon/meekConn.go @@ -159,6 +159,10 @@ type MeekConfig struct { // in effect. This value is used for stats reporting. TransformedHostName bool + // AddPsiphonFrontingHeader specifies whether to add the + // X-Psiphon-Fronting-Address custom header. + AddPsiphonFrontingHeader bool + // VerifyServerName specifies a domain name that must appear in the server // certificate. When blank, server certificate verification is disabled. VerifyServerName string @@ -614,24 +618,37 @@ func DialMeek( Opaque: opaqueURL, } - if meekConfig.UseHTTPS { - host, _, err := net.SplitHostPort(meekConfig.DialAddress) - if err != nil { - return nil, errors.Trace(err) - } - additionalHeaders = map[string][]string{ - "X-Psiphon-Fronting-Address": {host}, - } + if scheme == "http" && proxyUrl == nil { + + // Add custom headers to HTTP. This may be unproxied HTTP, or CONNECT + // method proxied HTTP, which is handled implicitly by DialTCP (in the + // latter case, the CONNECT request itself will also have custom + // headers via upstreamproxy applied by the dialer). + // + // When proxyUrl != nil, proxying is handled by http.Transport and + // custom headers are set in upstreamproxy.NewProxyAuthTransport, above. + + additionalHeaders = dialConfig.CustomHeaders + } else { - if proxyUrl == nil { - // Add custom headers to plain, unproxied HTTP and to CONNECT - // method proxied HTTP (in the latter case, the CONNECT request - // itself will also have custom headers via upstreamproxy applied - // by the dialer). + additionalHeaders = make(http.Header) + + // User-Agent is passed in via dialConfig.CustomHeaders. Always use + // any User-Agent header, even when not using all custom headers. + + userAgent := dialConfig.CustomHeaders.Get("User-Agent") + if userAgent != "" { + additionalHeaders.Set("User-Agent", userAgent) + } + } - additionalHeaders = dialConfig.CustomHeaders + if meekConfig.AddPsiphonFrontingHeader { + host, _, err := net.SplitHostPort(meekConfig.DialAddress) + if err != nil { + return nil, errors.Trace(err) } + additionalHeaders.Set("X-Psiphon-Fronting-Address", host) } meek.url = url From 7d0527df74c43fc673d6592618f403ed0a22a859 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Thu, 2 Feb 2023 14:38:02 -0500 Subject: [PATCH 10/44] Document Android network change detection limitation --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index d52e7144f..8ea97fcd2 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -1649,6 +1649,24 @@ public void onLost(Network network) { // If we are NOT in the VPN mode then monitor default active networks with the // Internet capability, including VPN, to ensure we won't trigger a reconnect in // case the VPN is up while the system switches the underlying network. + + // Limitation: for Psiphon Library apps running over Psiphon VPN, or other VPNs + // with a similar architecture, it may be better to trigger a reconnect when + // the underlying physical network changes. When the underlying network + // changes, Psiphon VPN will remain up and reconnect its own tunnel. For the + // Psiphon app, this monitoring will detect no change. However, the Psiphon + // app's tunnel may be lost, and, without network change detection, initiating + // a reconnect will be delayed. For example, if the Psiphon app's tunnel is + // using QUIC, the Psiphon VPN will tunnel that traffic over udpgw. When + // Psiphon VPN reconnects, the egress source address of that UDP flow will + // change -- getting either a different source IP if the Psiphon server + // changes, or a different source port even if the same server -- and the QUIC + // server will drop the packets. The Psiphon app will initiate a reconnect only + // after a SSH keep alive probes timeout or a QUIC timeout. + // + // TODO: Add a second ConnectivityManager/NetworkRequest instance to monitor + // for underlying physical network changes while any VPN remains up. + builder.removeCapability(NetworkCapabilities.NET_CAPABILITY_NOT_VPN); } From 4b966ff2935f349d522822792528f02db9da8d72 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Thu, 2 Feb 2023 15:07:01 -0500 Subject: [PATCH 11/44] Fix java.util.ConcurrentModificationException crash --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 8ea97fcd2..815be0970 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -1403,6 +1403,22 @@ private static Collection getActiveNetworkDNSServerAddresses(Contex NetworkRequest networkRequest = networkRequestBuilder.build(); + // There is a potential race condition in which the following + // network callback may be invoked, by a worker thread, after + // unregisterNetworkCallback. Synchronized access to a local + // ArrayList copy avoids the + // java.util.ConcurrentModificationException crash we previously + // observed when getActiveNetworkDNSServers iterated over the + // same ArrayList object value that was modified by the + // callback. + // + // The late invocation of the callback still results in an empty + // list of DNS servers, but this behavior has been observed only + // in artificial conditions while rapidly starting and stopping + // PsiphonTunnel. + + ArrayList callbackDnsAddresses = new ArrayList(); + final CountDownLatch countDownLatch = new CountDownLatch(1); try { ConnectivityManager.NetworkCallback networkCallback = @@ -1410,7 +1426,9 @@ private static Collection getActiveNetworkDNSServerAddresses(Contex @Override public void onLinkPropertiesChanged(Network network, LinkProperties linkProperties) { - dnsAddresses.addAll(linkProperties.getDnsServers()); + synchronized (callbackDnsAddresses) { + callbackDnsAddresses.addAll(linkProperties.getDnsServers()); + } countDownLatch.countDown(); } }; @@ -1423,6 +1441,10 @@ public void onLinkPropertiesChanged(Network network, } catch (InterruptedException e) { Thread.currentThread().interrupt(); } + + synchronized (callbackDnsAddresses) { + dnsAddresses.addAll(callbackDnsAddresses); + } } return dnsAddresses; From c02f144f50daf1d8a7bb21382537a16dcecfe14a Mon Sep 17 00:00:00 2001 From: Amir Khan Date: Thu, 2 Feb 2023 16:35:55 -0500 Subject: [PATCH 12/44] Updated NetworkID if an active VPN is detected in non-VPN mode --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 34 ++++++++++++++++--- .../PsiphonTunnel/Network/NetworkID.h | 2 ++ .../PsiphonTunnel/Network/NetworkID.m | 14 ++++++++ .../Psiphon/PsiphonProviderNetwork.h | 5 ++- .../Psiphon/PsiphonProviderNetwork.m | 28 ++++++--------- .../PsiphonTunnel/PsiphonTunnel.m | 5 ++- psiphon/dialParameters.go | 3 ++ 7 files changed, 67 insertions(+), 24 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 67cc0eb77..e484c9d5b 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -429,7 +429,7 @@ public long hasNetworkConnectivity() { @Override public String getNetworkID() { - return PsiphonTunnel.getNetworkID(context); + return PsiphonTunnel.getNetworkID(context, mPsiphonTunnel.isVpnMode()); } @Override @@ -672,7 +672,7 @@ public long hasIPv6Route() { @Override public String getNetworkID() { - return PsiphonTunnel.getNetworkID(mHostService.getContext()); + return PsiphonTunnel.getNetworkID(mHostService.getContext(), mPsiphonTunnel.isVpnMode()); } } @@ -746,7 +746,7 @@ private static long hasIPv6Route(Context context, HostLogger logger) { return hasRoute ? 1 : 0; } - private static String getNetworkID(Context context) { + private static String getNetworkID(Context context, boolean isVpnMode) { // TODO: getActiveNetworkInfo is deprecated in API 29; once // getActiveNetworkInfo is no longer available, use @@ -761,6 +761,30 @@ private static String getNetworkID(Context context) { String networkID = "UNKNOWN"; ConnectivityManager connectivityManager = (ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE); + + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { + + if (!isVpnMode) { + + NetworkCapabilities capabilities = null; + try { + Network nw = connectivityManager.getActiveNetwork(); + capabilities = connectivityManager.getNetworkCapabilities(nw); + } catch (java.lang.Exception e) { + // May get exceptions due to missing permissions like android.permission.ACCESS_NETWORK_STATE. + + // Apps using the Psiphon Library and lacking android.permission.ACCESS_NETWORK_STATE will + // proceed and use tactics, but with "UNKNOWN" as the sole network ID. + } + + if (capabilities != null && capabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) { + return "VPN"; + } + + } + + } + NetworkInfo activeNetworkInfo = null; try { activeNetworkInfo = connectivityManager.getActiveNetworkInfo(); @@ -1557,7 +1581,9 @@ private void setCurrentActiveNetworkAndProperties(Network network) { // mimics the type determination logic in // getNetworkID. NetworkCapabilities capabilities = connectivityManager.getNetworkCapabilities(network); - if (capabilities.hasTransport(NetworkCapabilities.TRANSPORT_CELLULAR)) { + if (capabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) { + networkType = "VPN"; + } else if (capabilities.hasTransport(NetworkCapabilities.TRANSPORT_CELLULAR)) { networkType = "MOBILE"; } else if (capabilities.hasTransport(NetworkCapabilities.TRANSPORT_WIFI)) { networkType = "WIFI"; diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.h b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.h index 56bd3152b..8a3cdcd55 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.h +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.h @@ -33,9 +33,11 @@ NS_ASSUME_NONNULL_BEGIN /// currentNetworkStatus is NetworkReachabilityReachableViaWired. /// @param currentNetworkStatus Used to determine network ID and, on iOS <12, to determine the active interface when /// currentNetworkStatus is NetworkReachabilityReachableViaWired. +/// @param tunnelWholeDevice False if library is used in non-VPN mode, true otherwise. /// @param outWarn If non-nil, then a non-fatal error occurred while determining the network ID and a valid network ID will still be returned. + (NSString *)getNetworkIDWithReachability:(id)reachability andCurrentNetworkStatus:(NetworkReachability)currentNetworkStatus + tunnelWholeDevice:(BOOL)tunnelWholeDevice warning:(NSError *_Nullable *_Nonnull)outWarn; @end diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.m b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.m index 3f3a57ebb..d00177f95 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.m +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Network/NetworkID.m @@ -28,10 +28,24 @@ @implementation NetworkID // See comment in header. + (NSString *)getNetworkIDWithReachability:(id)reachability andCurrentNetworkStatus:(NetworkReachability)currentNetworkStatus + tunnelWholeDevice:(BOOL)tunnelWholeDevice warning:(NSError *_Nullable *_Nonnull)outWarn { *outWarn = nil; + // NetworkID is "VPN" if the library is used in non-VPN mode, + // and an active VPN is found on the system. + // This method is not exact and relies on CFNetworkCopySystemProxySettings, + // specifically it may not return tun interfaces for some VPNs on macOS. + if (!tunnelWholeDevice) { + NSDictionary *_Nullable proxies = (__bridge NSDictionary *) CFNetworkCopySystemProxySettings(); + for (NSString *interface in [proxies[@"__SCOPED__"] allKeys]) { + if ([interface containsString:@"tun"] || [interface containsString:@"tap"] || [interface containsString:@"ppp"] || [interface containsString:@"ipsec"]) { + return @"VPN"; + } + } + } + NSMutableString *networkID = [NSMutableString stringWithString:@"UNKNOWN"]; if (currentNetworkStatus == NetworkReachabilityReachableViaWiFi) { [networkID setString:@"WIFI"]; diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.h b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.h index ba6e48cc9..21fcedd67 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.h +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.h @@ -24,7 +24,10 @@ NS_ASSUME_NONNULL_BEGIN @interface PsiphonProviderNetwork : NSObject -- (instancetype)initWithLogger:(void (^__nonnull)(NSString *_Nonnull))logger; +- (instancetype)init NS_UNAVAILABLE; + +- (instancetype)initWithTunnelWholeDevice:(BOOL)tunnelWholeDevice + logger:(void (^__nonnull)(NSString *_Nonnull))logger NS_DESIGNATED_INITIALIZER; @end diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.m b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.m index 59e571157..4b0e7f902 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.m +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/Psiphon/PsiphonProviderNetwork.m @@ -27,29 +27,20 @@ @implementation PsiphonProviderNetwork { id reachability; + BOOL tunnelWholeDevice; void (^logger) (NSString *_Nonnull); } -- (void)initialize { - if (@available(iOS 12.0, *)) { - self->reachability = [[DefaultRouteMonitor alloc] init]; - } else { - self->reachability = [Reachability reachabilityForInternetConnection]; - } -} - -- (id)init { - self = [super init]; - if (self) { - [self initialize]; - } - return self; -} - -- (instancetype)initWithLogger:(void (^__nonnull)(NSString *_Nonnull))logger { +- (instancetype)initWithTunnelWholeDevice:(BOOL)tunnelWholeDevice + logger:(void (^__nonnull)(NSString *_Nonnull))logger { self = [super init]; if (self) { - [self initialize]; + if (@available(iOS 12.0, *)) { + self->reachability = [[DefaultRouteMonitor alloc] init]; + } else { + self->reachability = [Reachability reachabilityForInternetConnection]; + } + self->tunnelWholeDevice = tunnelWholeDevice; self->logger = logger; } return self; @@ -74,6 +65,7 @@ - (NSString *)getNetworkID { NSError *warn; NSString *networkID = [NetworkID getNetworkIDWithReachability:self->reachability andCurrentNetworkStatus:self->reachability.reachabilityStatus + tunnelWholeDevice:self->tunnelWholeDevice warning:&warn]; if (warn != nil) { [self logMessage:[NSString stringWithFormat:@"error getting network ID: %@", warn.localizedDescription]]; diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m index 259a5da53..5e38cf89c 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m @@ -1294,6 +1294,7 @@ - (NSString *)getNetworkID { NSError *warn; NSString *networkID = [NetworkID getNetworkIDWithReachability:self->reachability andCurrentNetworkStatus:atomic_load(&self->currentNetworkStatus) + tunnelWholeDevice:self->tunnelWholeDevice warning:&warn]; if (warn != nil) { [self logMessage:[NSString stringWithFormat:@"error getting network ID: %@", warn.localizedDescription]]; @@ -1797,7 +1798,9 @@ - (void)startSendFeedback:(NSString * _Nonnull)feedbackJson PsiphonProviderNoticeHandlerShim *noticeHandler = [[PsiphonProviderNoticeHandlerShim alloc] initWithLogger:logNotice]; - PsiphonProviderNetwork *networkInfoProvider = [[PsiphonProviderNetwork alloc] initWithLogger:logger]; + PsiphonProviderNetwork *networkInfoProvider = [[PsiphonProviderNetwork alloc] + initWithTunnelWholeDevice:tunnelWholeDevice + logger:logger]; GoPsiStartSendFeedback( psiphonConfig, diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index 1a013c839..5c3fce47f 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -999,6 +999,9 @@ func (dialParams *DialParameters) GetNetworkType() string { // check for and use the common network type prefixes currently used in // NetworkIDGetter implementations. + if strings.HasPrefix(dialParams.NetworkID, "VPN") { + return "VPN" + } if strings.HasPrefix(dialParams.NetworkID, "WIFI") { return "WIFI" } From 419bc6bff17d39c5c82adfd53b587ed32634b8a2 Mon Sep 17 00:00:00 2001 From: Eugene Fryntov Date: Sun, 5 Feb 2023 11:29:02 -0500 Subject: [PATCH 13/44] Try and set network properties before tunnel starts. --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 33 ++++++++----------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 67cc0eb77..8d2a36456 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -147,6 +147,7 @@ default public void onExiting() {} private final NetworkMonitor mNetworkMonitor; private AtomicReference mActiveNetworkType; private AtomicReference mActiveNetworkDNSServers; + private final CountDownLatch mNetworkMonitorCountDownLatch = new CountDownLatch(1); // Only one PsiphonVpn instance may exist at a time, as the underlying // psi.Psi and tun2socks implementations each contain global state. @@ -209,7 +210,7 @@ public void onChanged() { mHostService.onDiagnosticMessage("reconnect error: " + e); } } - }, mActiveNetworkType, mActiveNetworkDNSServers, mHostService); + }); } public Object clone() throws CloneNotSupportedException { @@ -821,7 +822,9 @@ private void startPsiphon(String embeddedServerEntries) throws Exception { stopPsiphon(); mIsWaitingForNetworkConnectivity.set(false); mHostService.onDiagnosticMessage("starting Psiphon library"); + mNetworkMonitor.start(mHostService.getContext()); try { + mNetworkMonitorCountDownLatch.await(1, TimeUnit.SECONDS); Psi.start( loadPsiphonConfig(mHostService.getContext()), embeddedServerEntries, @@ -835,7 +838,6 @@ private void startPsiphon(String embeddedServerEntries) throws Exception { throw new Exception("failed to start Psiphon library", e); } - mNetworkMonitor.start(mHostService.getContext()); mHostService.onDiagnosticMessage("Psiphon library started"); } @@ -1483,25 +1485,15 @@ public Exception(String message, Throwable cause) { private static class NetworkMonitor { private final NetworkChangeListener listener; private ConnectivityManager.NetworkCallback networkCallback; - private AtomicReference activeNetworkType; - private AtomicReference activeNetworkDNSServers; - private HostLogger logger; - public NetworkMonitor( - NetworkChangeListener listener, - AtomicReference activeNetworkType, - AtomicReference activeNetworkDNSServers, - HostLogger logger) { - + NetworkChangeListener listener) { this.listener = listener; - this.activeNetworkType = activeNetworkType; - this.activeNetworkDNSServers = activeNetworkDNSServers; - this.logger = logger; } private void start(Context context) { // Need API 21(LOLLIPOP)+ for ConnectivityManager.NetworkCallback if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) { + mPsiphonTunnel.mNetworkMonitorCountDownLatch.countDown(); return; } ConnectivityManager connectivityManager = @@ -1543,9 +1535,9 @@ private void setCurrentActiveNetworkAndProperties(Network network) { if (network == null) { - activeNetworkType.set("NONE"); - activeNetworkDNSServers.set(""); - logger.onDiagnosticMessage("NetworkMonitor: clear current active network"); + mPsiphonTunnel.mActiveNetworkType.set("NONE"); + mPsiphonTunnel.mActiveNetworkDNSServers.set(""); + mPsiphonTunnel.mHostService.onDiagnosticMessage("NetworkMonitor: clear current active network"); } else { @@ -1564,7 +1556,7 @@ private void setCurrentActiveNetworkAndProperties(Network network) { } } catch (java.lang.Exception e) { } - activeNetworkType.set(networkType); + mPsiphonTunnel.mActiveNetworkType.set(networkType); ArrayList servers = new ArrayList(); try { @@ -1580,15 +1572,16 @@ private void setCurrentActiveNetworkAndProperties(Network network) { } catch (java.lang.Exception e) { } // Use the workaround, comma-delimited format required for gobind. - activeNetworkDNSServers.set(TextUtils.join(",", servers)); + mPsiphonTunnel.mActiveNetworkDNSServers.set(TextUtils.join(",", servers)); String message = "NetworkMonitor: set current active network " + networkType; if (!servers.isEmpty()) { // The DNS server address is potential PII and not logged. message += " with DNS"; } - logger.onDiagnosticMessage(message); + mPsiphonTunnel.mHostService.onDiagnosticMessage(message); } + mPsiphonTunnel.mNetworkMonitorCountDownLatch.countDown(); } @Override From e1eef8a0cf1bb1cd9ea463b03fda80c3695a5b28 Mon Sep 17 00:00:00 2001 From: Eugene Fryntov Date: Mon, 6 Feb 2023 10:18:56 -0500 Subject: [PATCH 14/44] Small refactoring, add comments. --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 8d2a36456..631775fe6 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -147,7 +147,6 @@ default public void onExiting() {} private final NetworkMonitor mNetworkMonitor; private AtomicReference mActiveNetworkType; private AtomicReference mActiveNetworkDNSServers; - private final CountDownLatch mNetworkMonitorCountDownLatch = new CountDownLatch(1); // Only one PsiphonVpn instance may exist at a time, as the underlying // psi.Psi and tun2socks implementations each contain global state. @@ -822,9 +821,10 @@ private void startPsiphon(String embeddedServerEntries) throws Exception { stopPsiphon(); mIsWaitingForNetworkConnectivity.set(false); mHostService.onDiagnosticMessage("starting Psiphon library"); - mNetworkMonitor.start(mHostService.getContext()); try { - mNetworkMonitorCountDownLatch.await(1, TimeUnit.SECONDS); + // mNetworkMonitor.start() will wait up to 1 second before returning to give the network + // callback a chance to populate active network properties before we start the tunnel. + mNetworkMonitor.start(mHostService.getContext()); Psi.start( loadPsiphonConfig(mHostService.getContext()), embeddedServerEntries, @@ -1490,10 +1490,11 @@ public NetworkMonitor( this.listener = listener; } - private void start(Context context) { + private void start(Context context) throws InterruptedException { + final CountDownLatch setNetworkPropertiesCountDownLatch = new CountDownLatch(1); + // Need API 21(LOLLIPOP)+ for ConnectivityManager.NetworkCallback if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) { - mPsiphonTunnel.mNetworkMonitorCountDownLatch.countDown(); return; } ConnectivityManager connectivityManager = @@ -1581,7 +1582,7 @@ private void setCurrentActiveNetworkAndProperties(Network network) { } mPsiphonTunnel.mHostService.onDiagnosticMessage(message); } - mPsiphonTunnel.mNetworkMonitorCountDownLatch.countDown(); + setNetworkPropertiesCountDownLatch.countDown(); } @Override @@ -1639,11 +1640,20 @@ public void onLost(Network network) { } NetworkRequest networkRequest = builder.build(); + // We are using requestNetwork and not registerNetworkCallback here because we found + // that the callbacks from requestNetwork are more accurate in terms of tracking + // currently active network. Another alternative to use for tracking active network + // would be registerDefaultNetworkCallback but a) it needs API >= 24 and b) doesn't + // provide a way to set up monitoring of underlying networks only when VPN transport + // is also active. connectivityManager.requestNetwork(networkRequest, networkCallback); } catch (RuntimeException ignored) { // Could be a security exception or any other runtime exception on customized firmwares. networkCallback = null; } + // We are going to wait up to one second for the network callback to populate + // active network properties before returning. + setNetworkPropertiesCountDownLatch.await(1, TimeUnit.SECONDS); } private void stop(Context context) { From 1fd8f2db0dc5897f8002a9e1350f23409e01b255 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Mon, 6 Feb 2023 16:42:14 -0500 Subject: [PATCH 15/44] Meek server changes - Add a distinct timeout for fronted protocols - Don't terminate fronted persistent connections due to client errors - Add MeekRequiredHeaders --- psiphon/server/config.go | 9 +++++ psiphon/server/meek.go | 78 +++++++++++++++++++++++++++++++------ psiphon/server/meek_test.go | 31 ++++++++++++--- 3 files changed, 101 insertions(+), 17 deletions(-) diff --git a/psiphon/server/config.go b/psiphon/server/config.go index 2cec6b332..181b48842 100644 --- a/psiphon/server/config.go +++ b/psiphon/server/config.go @@ -228,6 +228,10 @@ type Config struct { // request fails. This is used to defend against abuse. MeekProhibitedHeaders []string + // MeekRequiredHeaders is a list of HTTP header names and values that must + // appear in requests. This is used to defend against abuse. + MeekRequiredHeaders map[string]string + // MeekProxyForwardedForHeaders is a list of HTTP headers which // may be added by downstream HTTP proxies or CDNs in front // of clients. These headers supply the original client IP @@ -266,6 +270,11 @@ type Config struct { // timeouts. The default is MEEK_DEFAULT_HTTP_CLIENT_IO_TIMEOUT. MeekHTTPClientIOTimeoutMilliseconds *int + // MeekFrontedHTTPClientIOTimeoutMilliseconds specifies meek HTTP server + // I/O timeouts for fronted protocols. The default is + // MEEK_DEFAULT_FRONTED_HTTP_CLIENT_IO_TIMEOUT. + MeekFrontedHTTPClientIOTimeoutMilliseconds *int + // MeekCachedResponseBufferSize is the size of a private, // fixed-size buffer allocated for every meek client. The buffer // is used to cache response payload, allowing the client to retry diff --git a/psiphon/server/meek.go b/psiphon/server/meek.go index 9bde75e64..ff91efac6 100644 --- a/psiphon/server/meek.go +++ b/psiphon/server/meek.go @@ -85,6 +85,7 @@ const ( MEEK_DEFAULT_SKIP_EXTENDED_TURN_AROUND_THRESHOLD = 8192 MEEK_DEFAULT_MAX_SESSION_STALENESS = 45 * time.Second MEEK_DEFAULT_HTTP_CLIENT_IO_TIMEOUT = 45 * time.Second + MEEK_DEFAULT_FRONTED_HTTP_CLIENT_IO_TIMEOUT = 360 * time.Second MEEK_DEFAULT_RESPONSE_BUFFER_LENGTH = 65536 MEEK_DEFAULT_POOL_BUFFER_LENGTH = 65536 MEEK_DEFAULT_POOL_BUFFER_COUNT = 2048 @@ -106,6 +107,7 @@ type MeekServer struct { listener net.Listener listenerTunnelProtocol string listenerPort int + isFronted bool passthroughAddress string turnAroundTimeout time.Duration extendedTurnAroundTimeout time.Duration @@ -162,10 +164,24 @@ func NewMeekServer( *support.Config.MeekMaxSessionStalenessMilliseconds) * time.Millisecond } - httpClientIOTimeout := MEEK_DEFAULT_HTTP_CLIENT_IO_TIMEOUT - if support.Config.MeekHTTPClientIOTimeoutMilliseconds != nil { - httpClientIOTimeout = time.Duration( - *support.Config.MeekHTTPClientIOTimeoutMilliseconds) * time.Millisecond + var httpClientIOTimeout time.Duration + if isFronted { + + // Fronted has a distinct timeout, and the default is higher since new + // clients may connect to a CDN edge and start using an existing + // persistent connection. + + httpClientIOTimeout = MEEK_DEFAULT_FRONTED_HTTP_CLIENT_IO_TIMEOUT + if support.Config.MeekFrontedHTTPClientIOTimeoutMilliseconds != nil { + httpClientIOTimeout = time.Duration( + *support.Config.MeekFrontedHTTPClientIOTimeoutMilliseconds) * time.Millisecond + } + } else { + httpClientIOTimeout = MEEK_DEFAULT_HTTP_CLIENT_IO_TIMEOUT + if support.Config.MeekHTTPClientIOTimeoutMilliseconds != nil { + httpClientIOTimeout = time.Duration( + *support.Config.MeekHTTPClientIOTimeoutMilliseconds) * time.Millisecond + } } checksumTable := crc64.MakeTable(crc64.ECMA) @@ -195,6 +211,7 @@ func NewMeekServer( listener: listener, listenerTunnelProtocol: listenerTunnelProtocol, listenerPort: listenerPort, + isFronted: isFronted, passthroughAddress: passthroughAddress, turnAroundTimeout: turnAroundTimeout, extendedTurnAroundTimeout: extendedTurnAroundTimeout, @@ -321,6 +338,25 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request // Note: no longer requiring that the request method is POST + // Check for required headers and values. For fronting, required headers + // may be used to identify a CDN edge. When this check fails, + // TerminateHTTPConnection is called instead of handleError, so any + // persistent connection is always closed. + + if len(server.support.Config.MeekRequiredHeaders) > 0 { + for header, value := range server.support.Config.MeekRequiredHeaders { + requestValue := request.Header.Get(header) + if requestValue != value { + log.WithTraceFields(LogFields{ + "header": header, + "value": requestValue, + }).Warning("invalid required meek header") + common.TerminateHTTPConnection(responseWriter, request) + return + } + } + } + // Check for the expected meek/session ID cookie. // Also check for prohibited HTTP headers. @@ -343,7 +379,7 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request "header": header, "value": value, }).Warning("prohibited meek header") - common.TerminateHTTPConnection(responseWriter, request) + server.handleError(responseWriter, request) return } } @@ -370,7 +406,7 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request // Debug since session cookie errors commonly occur during // normal operation. log.WithTraceFields(LogFields{"error": err}).Debug("session lookup failed") - common.TerminateHTTPConnection(responseWriter, request) + server.handleError(responseWriter, request) return } @@ -383,7 +419,7 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request endPoint, common.GeoIPData(*endPointGeoIPData), responseWriter, request) if !handled { log.WithTraceFields(LogFields{"endPoint": endPoint}).Info("unhandled endpoint") - common.TerminateHTTPConnection(responseWriter, request) + server.handleError(responseWriter, request) } return } @@ -518,7 +554,7 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request if !session.cachedResponse.HasPosition(position) { greaterThanSwapInt64(&session.metricCachedResponseMissPosition, int64(position)) - common.TerminateHTTPConnection(responseWriter, request) + server.handleError(responseWriter, request) session.delete(true) return } @@ -572,7 +608,7 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request // also, golang network error messages may contain client IP. log.WithTraceFields(LogFields{"error": responseError}).Debug("write response failed") } - common.TerminateHTTPConnection(responseWriter, request) + server.handleError(responseWriter, request) // Note: keep session open to allow client to retry @@ -580,6 +616,20 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request } } +func (server *MeekServer) handleError(responseWriter http.ResponseWriter, request *http.Request) { + + // When fronted, keep the persistent connection open since it may be used + // by many clients coming through the same edge. For performance reasons, + // an error, including invalid input, from one client shouldn't close the + // persistent connection used by other clients. + + if server.isFronted { + http.NotFound(responseWriter, request) + return + } + common.TerminateHTTPConnection(responseWriter, request) +} + func checkRangeHeader(request *http.Request) (int, bool) { rangeHeader := request.Header.Get("Range") if rangeHeader == "" { @@ -1729,9 +1779,13 @@ func (conn *meekConn) Close() error { if atomic.CompareAndSwapInt32(&conn.closed, 0, 1) { close(conn.closeBroadcast) - // In general, we reply on "net/http" to close underlying TCP conns. In this - // case, we can directly close the first once, if it's still open. - conn.firstUnderlyingConn.Close() + // In general, we reply on "net/http" to close underlying TCP conns. + // In this case, we can directly close the first once, if it's still + // open. Don't close a persistent connection when fronted, as it may + // be still be used by other clients. + if !conn.meekServer.isFronted { + conn.firstUnderlyingConn.Close() + } } return nil } diff --git a/psiphon/server/meek_test.go b/psiphon/server/meek_test.go index 72d79077c..bd5f3eed7 100755 --- a/psiphon/server/meek_test.go +++ b/psiphon/server/meek_test.go @@ -28,6 +28,7 @@ import ( "io/ioutil" "math/rand" "net" + "net/http" "path/filepath" "sync" "sync/atomic" @@ -407,17 +408,23 @@ func (interruptor *fileDescriptorInterruptor) BindToDevice(fileDescriptor int) ( return "", nil } +func TestMeekServer(t *testing.T) { + runTestMeekAccessControl(t, false, false, false) +} + func TestMeekRateLimiter(t *testing.T) { - runTestMeekAccessControl(t, true, false) - runTestMeekAccessControl(t, false, false) + runTestMeekAccessControl(t, true, false, false) } func TestMeekRestrictFrontingProviders(t *testing.T) { - runTestMeekAccessControl(t, false, true) - runTestMeekAccessControl(t, false, false) + runTestMeekAccessControl(t, false, true, false) } -func runTestMeekAccessControl(t *testing.T, rateLimit, restrictProvider bool) { +func TestMeekMissingRequiredHeaders(t *testing.T) { + runTestMeekAccessControl(t, false, false, true) +} + +func runTestMeekAccessControl(t *testing.T, rateLimit, restrictProvider, missingRequiredHeaders bool) { attempts := 10 @@ -431,6 +438,10 @@ func runTestMeekAccessControl(t *testing.T, rateLimit, restrictProvider bool) { allowedConnections = 0 } + if missingRequiredHeaders { + allowedConnections = 0 + } + // Configure tactics frontingProviderID := prng.HexString(8) @@ -492,10 +503,14 @@ func runTestMeekAccessControl(t *testing.T, rateLimit, restrictProvider bool) { meekRateLimiterTunnelProtocols = []string{protocol.TUNNEL_PROTOCOL_FRONTED_MEEK} } + requiredHeaderName := "X-Psiphon-Required-Header" + requiredHeaderValue := prng.Base64String(32) + mockSupport := &SupportServices{ Config: &Config{ MeekObfuscatedKey: meekObfuscatedKey, MeekCookieEncryptionPrivateKey: meekCookieEncryptionPrivateKey, + MeekRequiredHeaders: map[string]string{requiredHeaderName: requiredHeaderValue}, TunnelProtocolPorts: map[string]int{tunnelProtocol: 0}, frontingProviderID: frontingProviderID, }, @@ -590,6 +605,12 @@ func runTestMeekAccessControl(t *testing.T, rateLimit, restrictProvider bool) { }, } + if !missingRequiredHeaders { + headers := make(http.Header) + headers.Add(requiredHeaderName, requiredHeaderValue) + dialConfig.CustomHeaders = headers + } + params, err := parameters.NewParameters(nil) if err != nil { t.Fatalf("NewParameters failed: %s", err) From 89f5c66f2c7f39892a2b6866fb2e7540a269f6ee Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Wed, 15 Feb 2023 13:52:42 -0500 Subject: [PATCH 16/44] Use Go 1.19.6 --- .github/workflows/tests.yml | 2 +- ClientLibrary/Dockerfile | 2 +- ClientLibrary/build-darwin.sh | 4 ++-- ConsoleClient/Dockerfile | 2 +- MobileLibrary/Android/Dockerfile | 2 +- MobileLibrary/Android/make.bash | 4 ++-- MobileLibrary/iOS/build-psiphon-framework.sh | 2 +- Server/Dockerfile-binary-builder | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cad2ea6ce..cb78aa55e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,7 +15,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu" ] - go: [ "1.19.5" ] + go: [ "1.19.6" ] test-type: [ "detector", "coverage", "memory" ] runs-on: ${{ matrix.os }}-latest diff --git a/ClientLibrary/Dockerfile b/ClientLibrary/Dockerfile index 16abfcf13..842441f93 100644 --- a/ClientLibrary/Dockerfile +++ b/ClientLibrary/Dockerfile @@ -21,7 +21,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ # Install Go. # NOTE: Go 1.10+ is required to build c-shared for windows (https://github.com/golang/go/commit/bb0bfd002ada7e3eb9198d4287b32c2fed6e8da6) -ENV GOVERSION=go1.19.5 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.19.6 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/ClientLibrary/build-darwin.sh b/ClientLibrary/build-darwin.sh index 6e1bef92d..ba3aa7b18 100755 --- a/ClientLibrary/build-darwin.sh +++ b/ClientLibrary/build-darwin.sh @@ -9,8 +9,8 @@ if [ -z ${2+x} ]; then BUILD_TAGS=""; else BUILD_TAGS="$2"; fi # Note: # clangwrap.sh needs to be updated when the Go version changes. # The last version was: -# https://github.com/golang/go/blob/go1.19.5/misc/ios/clangwrap.sh -GO_VERSION_REQUIRED="1.19.5" +# https://github.com/golang/go/blob/go1.19.6/misc/ios/clangwrap.sh +GO_VERSION_REQUIRED="1.19.6" BASE_DIR=$(cd "$(dirname "$0")" ; pwd -P) cd ${BASE_DIR} diff --git a/ConsoleClient/Dockerfile b/ConsoleClient/Dockerfile index b319e14ee..99db7cc75 100644 --- a/ConsoleClient/Dockerfile +++ b/ConsoleClient/Dockerfile @@ -22,7 +22,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go. -ENV GOVERSION=go1.19.5 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.19.6 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/MobileLibrary/Android/Dockerfile b/MobileLibrary/Android/Dockerfile index 574e61b05..81338b310 100644 --- a/MobileLibrary/Android/Dockerfile +++ b/MobileLibrary/Android/Dockerfile @@ -22,7 +22,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go. -ENV GOVERSION=go1.19.5 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.19.6 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/MobileLibrary/Android/make.bash b/MobileLibrary/Android/make.bash index 16ae05f48..a755ee09d 100755 --- a/MobileLibrary/Android/make.bash +++ b/MobileLibrary/Android/make.bash @@ -17,8 +17,8 @@ export GOCACHE=/tmp BUILDINFOFILE="psiphon-tunnel-core_buildinfo.txt" BUILDDATE=$(date --iso-8601=seconds) -BUILDREPO=$(git config --get remote.origin.url) -BUILDREV=$(git rev-parse --short HEAD) +BUILDREPO="https://github.com/Psiphon-Labs/psiphon-tunnel-core.git" +BUILDREV="5641695f" GOVERSION=$(go version | perl -ne '/go version (.*?) / && print $1') LDFLAGS="\ diff --git a/MobileLibrary/iOS/build-psiphon-framework.sh b/MobileLibrary/iOS/build-psiphon-framework.sh index 5f0106b6c..406bd8cf5 100755 --- a/MobileLibrary/iOS/build-psiphon-framework.sh +++ b/MobileLibrary/iOS/build-psiphon-framework.sh @@ -17,7 +17,7 @@ set -e -u -x if [ -z ${1+x} ]; then BUILD_TAGS=""; else BUILD_TAGS="$1"; fi # Modify this value as we use newer Go versions. -GO_VERSION_REQUIRED="1.19.5" +GO_VERSION_REQUIRED="1.19.6" # At this time, psiphon-tunnel-core doesn't support modules export GO111MODULE=off diff --git a/Server/Dockerfile-binary-builder b/Server/Dockerfile-binary-builder index 325e1c854..7c2472b78 100644 --- a/Server/Dockerfile-binary-builder +++ b/Server/Dockerfile-binary-builder @@ -1,6 +1,6 @@ FROM alpine:3.10.2 -ENV GOLANG_VERSION 1.19.5 +ENV GOLANG_VERSION 1.19.6 ENV GOLANG_SRC_URL https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz RUN set -ex \ From d3ae222a54208aaffe111c0b0733232e33bda19e Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Wed, 15 Feb 2023 13:52:57 -0500 Subject: [PATCH 17/44] Fix comment typo --- psiphon/server/meek.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psiphon/server/meek.go b/psiphon/server/meek.go index ff91efac6..9b0675341 100644 --- a/psiphon/server/meek.go +++ b/psiphon/server/meek.go @@ -1779,8 +1779,8 @@ func (conn *meekConn) Close() error { if atomic.CompareAndSwapInt32(&conn.closed, 0, 1) { close(conn.closeBroadcast) - // In general, we reply on "net/http" to close underlying TCP conns. - // In this case, we can directly close the first once, if it's still + // In general, we rely on "net/http" to close underlying TCP conns. In + // this case, we can directly close the first once, if it's still // open. Don't close a persistent connection when fronted, as it may // be still be used by other clients. if !conn.meekServer.isFronted { From 4c0a1cba5b8bdc947d302f8bdf90ddb7fef4de57 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Wed, 15 Feb 2023 14:02:03 -0500 Subject: [PATCH 18/44] Update x/net (for x/net/http2) --- go.mod | 8 +- go.sum | 8 + vendor/golang.org/x/net/http2/flow.go | 88 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/headermap.go | 18 + vendor/golang.org/x/net/http2/hpack/encode.go | 5 + vendor/golang.org/x/net/http2/hpack/hpack.go | 81 +- .../x/net/http2/hpack/static_table.go | 188 ++++ vendor/golang.org/x/net/http2/hpack/tables.go | 78 +- vendor/golang.org/x/net/http2/server.go | 335 +++++--- vendor/golang.org/x/net/http2/transport.go | 201 +++-- .../net/internal/socket/zsys_openbsd_ppc64.go | 30 + .../internal/socket/zsys_openbsd_riscv64.go | 30 + vendor/golang.org/x/net/ipv6/dgramopt.go | 2 +- vendor/golang.org/x/net/trace/histogram.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +- vendor/golang.org/x/sys/cpu/endian_big.go | 11 + vendor/golang.org/x/sys/cpu/endian_little.go | 11 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 ++ .../golang.org/x/sys/execabs/execabs_go119.go | 8 +- vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ioctl.go | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 1 + .../x/sys/unix/syscall_freebsd_386.go | 9 +- .../x/sys/unix/syscall_freebsd_amd64.go | 9 +- .../x/sys/unix/syscall_freebsd_arm.go | 9 +- .../x/sys/unix/syscall_freebsd_arm64.go | 9 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 9 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 22 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + vendor/golang.org/x/sys/unix/syscall_linux.go | 51 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 15 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../x/sys/unix/syscall_openbsd_libc.go | 4 +- .../golang.org/x/sys/unix/syscall_solaris.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 57 +- vendor/golang.org/x/sys/unix/timestruct.go | 2 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 9 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 30 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_loong64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 356 ++++++-- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +++- .../x/sys/unix/zerrors_openbsd_arm.go | 348 ++++++-- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +++- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_386.go | 10 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 + .../golang.org/x/sys/unix/zsyscall_linux.go | 11 + .../x/sys/unix/zsyscall_netbsd_386.go | 10 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 + .../x/sys/unix/zsyscall_openbsd_386.go | 14 + .../x/sys/unix/zsyscall_openbsd_386.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_arm.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_mips64.go | 812 +++++++++++++++--- .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 +++++++++++++++ .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 137 +-- .../x/sys/unix/zsyscall_solaris_amd64.go | 13 + .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 217 ++++- .../x/sys/unix/ztypes_netbsd_386.go | 84 ++ .../x/sys/unix/ztypes_netbsd_amd64.go | 84 ++ .../x/sys/unix/ztypes_netbsd_arm.go | 84 ++ .../x/sys/unix/ztypes_netbsd_arm64.go | 84 ++ .../x/sys/unix/ztypes_openbsd_386.go | 97 +-- .../x/sys/unix/ztypes_openbsd_amd64.go | 33 +- .../x/sys/unix/ztypes_openbsd_arm.go | 9 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 9 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 9 +- .../x/sys/windows/syscall_windows.go | 15 +- .../x/sys/windows/zsyscall_windows.go | 7 + vendor/golang.org/x/term/AUTHORS | 3 - vendor/golang.org/x/term/CONTRIBUTORS | 3 - vendor/golang.org/x/term/term.go | 10 +- vendor/golang.org/x/term/terminal.go | 3 +- vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/unicode/bidi/core.go | 26 +- .../golang.org/x/text/unicode/bidi/trieval.go | 12 - .../x/text/unicode/norm/forminfo.go | 9 +- .../x/text/unicode/norm/normalize.go | 11 +- .../x/text/unicode/norm/tables13.0.0.go | 4 +- vendor/modules.txt | 8 +- 121 files changed, 4439 insertions(+), 1607 deletions(-) create mode 100644 vendor/golang.org/x/net/http2/hpack/static_table.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s delete mode 100644 vendor/golang.org/x/term/AUTHORS delete mode 100644 vendor/golang.org/x/term/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS diff --git a/go.mod b/go.mod index 8b211c6b7..b48d29510 100644 --- a/go.mod +++ b/go.mod @@ -49,10 +49,10 @@ require ( github.com/wader/filtertransport v0.0.0-20200316221534-bdd9e61eee78 github.com/zach-klippenstein/goregen v0.0.0-20160303162051-795b5e3961ea golang.org/x/crypto v0.0.0-20221012134737-56aed061732a - golang.org/x/net v0.0.0-20221014081412-f15817d10f9b + golang.org/x/net v0.7.0 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.2.0 - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 + golang.org/x/sys v0.5.0 + golang.org/x/term v0.5.0 ) require ( @@ -84,7 +84,7 @@ require ( gitlab.com/yawning/obfs4.git v0.0.0-20190120164510-816cff15f425 // indirect golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/tools v0.1.12 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 44368fee0..5b3469354 100644 --- a/go.sum +++ b/go.sum @@ -257,6 +257,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgk golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -303,15 +305,21 @@ golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 h1:OK7RB6t2WQX54srQQYSXMW8dF golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index b51f0e0cf..b7dbd1869 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -6,23 +6,91 @@ package http2 -// flow is the flow control window's size. -type flow struct { +// inflowMinRefresh is the minimum number of bytes we'll send for a +// flow control window update. +const inflowMinRefresh = 4 << 10 + +// inflow accounts for an inbound flow control window. +// It tracks both the latest window sent to the peer (used for enforcement) +// and the accumulated unsent window. +type inflow struct { + avail int32 + unsent int32 +} + +// init sets the initial window. +func (f *inflow) init(n int32) { + f.avail = n +} + +// add adds n bytes to the window, with a maximum window size of max, +// indicating that the peer can now send us more data. +// For example, the user read from a {Request,Response} body and consumed +// some of the buffered data, so the peer can now send more. +// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer. +// Window updates are accumulated and sent when the unsent capacity +// is at least inflowMinRefresh or will at least double the peer's available window. +func (f *inflow) add(n int) (connAdd int32) { + if n < 0 { + panic("negative update") + } + unsent := int64(f.unsent) + int64(n) + // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets." + // RFC 7540 Section 6.9.1. + const maxWindow = 1<<31 - 1 + if unsent+int64(f.avail) > maxWindow { + panic("flow control update exceeds maximum window size") + } + f.unsent = int32(unsent) + if f.unsent < inflowMinRefresh && f.unsent < f.avail { + // If there aren't at least inflowMinRefresh bytes of window to send, + // and this update won't at least double the window, buffer the update for later. + return 0 + } + f.avail += f.unsent + f.unsent = 0 + return int32(unsent) +} + +// take attempts to take n bytes from the peer's flow control window. +// It reports whether the window has available capacity. +func (f *inflow) take(n uint32) bool { + if n > uint32(f.avail) { + return false + } + f.avail -= int32(n) + return true +} + +// takeInflows attempts to take n bytes from two inflows, +// typically connection-level and stream-level flows. +// It reports whether both windows have available capacity. +func takeInflows(f1, f2 *inflow, n uint32) bool { + if n > uint32(f1.avail) || n > uint32(f2.avail) { + return false + } + f1.avail -= int32(n) + f2.avail -= int32(n) + return true +} + +// outflow is the outbound flow control window's size. +type outflow struct { _ incomparable // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. + // An outflow is kept both on a conn and a per-stream. n int32 - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow + // conn points to the shared connection-level outflow that is + // shared by all streams on that conn. It is nil for the outflow // that's on the conn directly. - conn *flow + conn *outflow } -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } +func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf } -func (f *flow) available() int32 { +func (f *outflow) available() int32 { n := f.n if f.conn != nil && f.conn.n < n { n = f.conn.n @@ -30,7 +98,7 @@ func (f *flow) available() int32 { return n } -func (f *flow) take(n int32) { +func (f *outflow) take(n int32) { if n > f.available() { panic("internal error: took too much") } @@ -42,7 +110,7 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { +func (f *outflow) add(n int32) bool { sum := f.n + n if (sum > n) == (f.n > 0) { f.n = sum diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 184ac45fe..c1f6b90dc 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -662,6 +662,15 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { // It is the caller's responsibility not to violate the maximum frame size // and to not call other Write methods concurrently. func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil { + return err + } + return f.endWrite() +} + +// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer. +// The caller should call endWrite to flush the frame to the underlying writer. +func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } @@ -691,7 +700,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by } f.wbuf = append(f.wbuf, data...) f.wbuf = append(f.wbuf, pad...) - return f.endWrite() + return nil } // A SettingsFrame conveys configuration parameters that affect how diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index 9e12941da..149b3dd20 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -27,7 +27,14 @@ func buildCommonHeaderMaps() { "accept-language", "accept-ranges", "age", + "access-control-allow-credentials", + "access-control-allow-headers", + "access-control-allow-methods", "access-control-allow-origin", + "access-control-expose-headers", + "access-control-max-age", + "access-control-request-headers", + "access-control-request-method", "allow", "authorization", "cache-control", @@ -53,6 +60,7 @@ func buildCommonHeaderMaps() { "link", "location", "max-forwards", + "origin", "proxy-authenticate", "proxy-authorization", "range", @@ -68,6 +76,8 @@ func buildCommonHeaderMaps() { "vary", "via", "www-authenticate", + "x-forwarded-for", + "x-forwarded-proto", } commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) @@ -85,3 +95,11 @@ func lowerHeader(v string) (lower string, ascii bool) { } return asciiToLower(v) } + +func canonicalHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonCanonHeader[v]; ok { + return s + } + return http.CanonicalHeaderKey(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 6886dc163..46219da2b 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) { e.dynTab.setMaxSize(v) } +// MaxDynamicTableSize returns the current dynamic header table size. +func (e *Encoder) MaxDynamicTableSize() (v uint32) { + return e.dynTab.maxSize +} + // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index ebdfbee96..7a1d97669 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -211,7 +211,7 @@ func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { return dt.ents[dt.len()-(int(i)-staticTable.len())], true } -// Decode decodes an entire block. +// DecodeFull decodes an entire block. // // TODO: remove this method and make it incremental later? This is // easier for debugging now. @@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { var hf HeaderField wantStr := d.emitEnabled || it.indexed() + var undecodedName undecodedString if nameIdx > 0 { ihf, ok := d.at(nameIdx) if !ok { @@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { } hf.Name = ihf.Name } else { - hf.Name, buf, err = d.readString(buf, wantStr) + undecodedName, buf, err = d.readString(buf) if err != nil { return err } } - hf.Value, buf, err = d.readString(buf, wantStr) + undecodedValue, buf, err := d.readString(buf) if err != nil { return err } + if wantStr { + if nameIdx <= 0 { + hf.Name, err = d.decodeString(undecodedName) + if err != nil { + return err + } + } + hf.Value, err = d.decodeString(undecodedValue) + if err != nil { + return err + } + } d.buf = buf if it.indexed() { d.dynTab.add(hf) @@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { return 0, origP, errNeedMore } -// readString decodes an hpack string from p. +// readString reads an hpack string from p. // -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { +// It returns a reference to the encoded string data to permit deferring decode costs +// until after the caller verifies all data is present. +func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) { if len(p) == 0 { - return "", p, errNeedMore + return u, p, errNeedMore } isHuff := p[0]&128 != 0 strLen, p, err := readVarInt(7, p) if err != nil { - return "", p, err + return u, p, err } if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength + // Returning an error here means Huffman decoding errors + // for non-indexed strings past the maximum string length + // are ignored, but the server is returning an error anyway + // and because the string is not indexed the error will not + // affect the decoding state. + return u, nil, ErrStringLength } if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil + return u, p, errNeedMore } + u.isHuff = isHuff + u.b = p[:strLen] + return u, p[strLen:], nil +} - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } +type undecodedString struct { + isHuff bool + b []byte +} + +func (d *Decoder) decodeString(u undecodedString) (string, error) { + if !u.isHuff { + return string(u.b), nil + } + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + var s string + err := huffmanDecode(buf, d.maxStrLen, u.b) + if err == nil { s = buf.String() - buf.Reset() // be nice to GC } - return s, p[strLen:], nil + buf.Reset() // be nice to GC + bufPool.Put(buf) + return s, err } diff --git a/vendor/golang.org/x/net/http2/hpack/static_table.go b/vendor/golang.org/x/net/http2/hpack/static_table.go new file mode 100644 index 000000000..754a1eb91 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/static_table.go @@ -0,0 +1,188 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package hpack + +var staticTable = &headerFieldTable{ + evictCount: 0, + byName: map[string]uint64{ + ":authority": 1, + ":method": 3, + ":path": 5, + ":scheme": 7, + ":status": 14, + "accept-charset": 15, + "accept-encoding": 16, + "accept-language": 17, + "accept-ranges": 18, + "accept": 19, + "access-control-allow-origin": 20, + "age": 21, + "allow": 22, + "authorization": 23, + "cache-control": 24, + "content-disposition": 25, + "content-encoding": 26, + "content-language": 27, + "content-length": 28, + "content-location": 29, + "content-range": 30, + "content-type": 31, + "cookie": 32, + "date": 33, + "etag": 34, + "expect": 35, + "expires": 36, + "from": 37, + "host": 38, + "if-match": 39, + "if-modified-since": 40, + "if-none-match": 41, + "if-range": 42, + "if-unmodified-since": 43, + "last-modified": 44, + "link": 45, + "location": 46, + "max-forwards": 47, + "proxy-authenticate": 48, + "proxy-authorization": 49, + "range": 50, + "referer": 51, + "refresh": 52, + "retry-after": 53, + "server": 54, + "set-cookie": 55, + "strict-transport-security": 56, + "transfer-encoding": 57, + "user-agent": 58, + "vary": 59, + "via": 60, + "www-authenticate": 61, + }, + byNameValue: map[pairNameValue]uint64{ + {name: ":authority", value: ""}: 1, + {name: ":method", value: "GET"}: 2, + {name: ":method", value: "POST"}: 3, + {name: ":path", value: "/"}: 4, + {name: ":path", value: "/index.html"}: 5, + {name: ":scheme", value: "http"}: 6, + {name: ":scheme", value: "https"}: 7, + {name: ":status", value: "200"}: 8, + {name: ":status", value: "204"}: 9, + {name: ":status", value: "206"}: 10, + {name: ":status", value: "304"}: 11, + {name: ":status", value: "400"}: 12, + {name: ":status", value: "404"}: 13, + {name: ":status", value: "500"}: 14, + {name: "accept-charset", value: ""}: 15, + {name: "accept-encoding", value: "gzip, deflate"}: 16, + {name: "accept-language", value: ""}: 17, + {name: "accept-ranges", value: ""}: 18, + {name: "accept", value: ""}: 19, + {name: "access-control-allow-origin", value: ""}: 20, + {name: "age", value: ""}: 21, + {name: "allow", value: ""}: 22, + {name: "authorization", value: ""}: 23, + {name: "cache-control", value: ""}: 24, + {name: "content-disposition", value: ""}: 25, + {name: "content-encoding", value: ""}: 26, + {name: "content-language", value: ""}: 27, + {name: "content-length", value: ""}: 28, + {name: "content-location", value: ""}: 29, + {name: "content-range", value: ""}: 30, + {name: "content-type", value: ""}: 31, + {name: "cookie", value: ""}: 32, + {name: "date", value: ""}: 33, + {name: "etag", value: ""}: 34, + {name: "expect", value: ""}: 35, + {name: "expires", value: ""}: 36, + {name: "from", value: ""}: 37, + {name: "host", value: ""}: 38, + {name: "if-match", value: ""}: 39, + {name: "if-modified-since", value: ""}: 40, + {name: "if-none-match", value: ""}: 41, + {name: "if-range", value: ""}: 42, + {name: "if-unmodified-since", value: ""}: 43, + {name: "last-modified", value: ""}: 44, + {name: "link", value: ""}: 45, + {name: "location", value: ""}: 46, + {name: "max-forwards", value: ""}: 47, + {name: "proxy-authenticate", value: ""}: 48, + {name: "proxy-authorization", value: ""}: 49, + {name: "range", value: ""}: 50, + {name: "referer", value: ""}: 51, + {name: "refresh", value: ""}: 52, + {name: "retry-after", value: ""}: 53, + {name: "server", value: ""}: 54, + {name: "set-cookie", value: ""}: 55, + {name: "strict-transport-security", value: ""}: 56, + {name: "transfer-encoding", value: ""}: 57, + {name: "user-agent", value: ""}: 58, + {name: "vary", value: ""}: 59, + {name: "via", value: ""}: 60, + {name: "www-authenticate", value: ""}: 61, + }, + ents: []HeaderField{ + {Name: ":authority", Value: "", Sensitive: false}, + {Name: ":method", Value: "GET", Sensitive: false}, + {Name: ":method", Value: "POST", Sensitive: false}, + {Name: ":path", Value: "/", Sensitive: false}, + {Name: ":path", Value: "/index.html", Sensitive: false}, + {Name: ":scheme", Value: "http", Sensitive: false}, + {Name: ":scheme", Value: "https", Sensitive: false}, + {Name: ":status", Value: "200", Sensitive: false}, + {Name: ":status", Value: "204", Sensitive: false}, + {Name: ":status", Value: "206", Sensitive: false}, + {Name: ":status", Value: "304", Sensitive: false}, + {Name: ":status", Value: "400", Sensitive: false}, + {Name: ":status", Value: "404", Sensitive: false}, + {Name: ":status", Value: "500", Sensitive: false}, + {Name: "accept-charset", Value: "", Sensitive: false}, + {Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false}, + {Name: "accept-language", Value: "", Sensitive: false}, + {Name: "accept-ranges", Value: "", Sensitive: false}, + {Name: "accept", Value: "", Sensitive: false}, + {Name: "access-control-allow-origin", Value: "", Sensitive: false}, + {Name: "age", Value: "", Sensitive: false}, + {Name: "allow", Value: "", Sensitive: false}, + {Name: "authorization", Value: "", Sensitive: false}, + {Name: "cache-control", Value: "", Sensitive: false}, + {Name: "content-disposition", Value: "", Sensitive: false}, + {Name: "content-encoding", Value: "", Sensitive: false}, + {Name: "content-language", Value: "", Sensitive: false}, + {Name: "content-length", Value: "", Sensitive: false}, + {Name: "content-location", Value: "", Sensitive: false}, + {Name: "content-range", Value: "", Sensitive: false}, + {Name: "content-type", Value: "", Sensitive: false}, + {Name: "cookie", Value: "", Sensitive: false}, + {Name: "date", Value: "", Sensitive: false}, + {Name: "etag", Value: "", Sensitive: false}, + {Name: "expect", Value: "", Sensitive: false}, + {Name: "expires", Value: "", Sensitive: false}, + {Name: "from", Value: "", Sensitive: false}, + {Name: "host", Value: "", Sensitive: false}, + {Name: "if-match", Value: "", Sensitive: false}, + {Name: "if-modified-since", Value: "", Sensitive: false}, + {Name: "if-none-match", Value: "", Sensitive: false}, + {Name: "if-range", Value: "", Sensitive: false}, + {Name: "if-unmodified-since", Value: "", Sensitive: false}, + {Name: "last-modified", Value: "", Sensitive: false}, + {Name: "link", Value: "", Sensitive: false}, + {Name: "location", Value: "", Sensitive: false}, + {Name: "max-forwards", Value: "", Sensitive: false}, + {Name: "proxy-authenticate", Value: "", Sensitive: false}, + {Name: "proxy-authorization", Value: "", Sensitive: false}, + {Name: "range", Value: "", Sensitive: false}, + {Name: "referer", Value: "", Sensitive: false}, + {Name: "refresh", Value: "", Sensitive: false}, + {Name: "retry-after", Value: "", Sensitive: false}, + {Name: "server", Value: "", Sensitive: false}, + {Name: "set-cookie", Value: "", Sensitive: false}, + {Name: "strict-transport-security", Value: "", Sensitive: false}, + {Name: "transfer-encoding", Value: "", Sensitive: false}, + {Name: "user-agent", Value: "", Sensitive: false}, + {Name: "vary", Value: "", Sensitive: false}, + {Name: "via", Value: "", Sensitive: false}, + {Name: "www-authenticate", Value: "", Sensitive: false}, + }, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index a66cfbea6..8cbdf3f01 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -96,8 +96,7 @@ func (t *headerFieldTable) evictOldest(n int) { // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic // table, the return value i actually refers to the entry t.ents[t.len()-i]. // -// All tables are assumed to be a dynamic tables except for the global -// staticTable pointer. +// All tables are assumed to be a dynamic tables except for the global staticTable. // // See Section 2.3.3. func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { @@ -125,81 +124,6 @@ func (t *headerFieldTable) idToIndex(id uint64) uint64 { return k + 1 } -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = newStaticTable() -var staticTableEntries = [...]HeaderField{ - {Name: ":authority"}, - {Name: ":method", Value: "GET"}, - {Name: ":method", Value: "POST"}, - {Name: ":path", Value: "/"}, - {Name: ":path", Value: "/index.html"}, - {Name: ":scheme", Value: "http"}, - {Name: ":scheme", Value: "https"}, - {Name: ":status", Value: "200"}, - {Name: ":status", Value: "204"}, - {Name: ":status", Value: "206"}, - {Name: ":status", Value: "304"}, - {Name: ":status", Value: "400"}, - {Name: ":status", Value: "404"}, - {Name: ":status", Value: "500"}, - {Name: "accept-charset"}, - {Name: "accept-encoding", Value: "gzip, deflate"}, - {Name: "accept-language"}, - {Name: "accept-ranges"}, - {Name: "accept"}, - {Name: "access-control-allow-origin"}, - {Name: "age"}, - {Name: "allow"}, - {Name: "authorization"}, - {Name: "cache-control"}, - {Name: "content-disposition"}, - {Name: "content-encoding"}, - {Name: "content-language"}, - {Name: "content-length"}, - {Name: "content-location"}, - {Name: "content-range"}, - {Name: "content-type"}, - {Name: "cookie"}, - {Name: "date"}, - {Name: "etag"}, - {Name: "expect"}, - {Name: "expires"}, - {Name: "from"}, - {Name: "host"}, - {Name: "if-match"}, - {Name: "if-modified-since"}, - {Name: "if-none-match"}, - {Name: "if-range"}, - {Name: "if-unmodified-since"}, - {Name: "last-modified"}, - {Name: "link"}, - {Name: "location"}, - {Name: "max-forwards"}, - {Name: "proxy-authenticate"}, - {Name: "proxy-authorization"}, - {Name: "range"}, - {Name: "referer"}, - {Name: "refresh"}, - {Name: "retry-after"}, - {Name: "server"}, - {Name: "set-cookie"}, - {Name: "strict-transport-security"}, - {Name: "transfer-encoding"}, - {Name: "user-agent"}, - {Name: "vary"}, - {Name: "via"}, - {Name: "www-authenticate"}, -} - -func newStaticTable() *headerFieldTable { - t := &headerFieldTable{} - t.init() - for _, e := range staticTableEntries[:] { - t.addEntry(e) - } - return t -} - var huffmanCodes = [256]uint32{ 0x1ff8, 0x7fffd8, diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 43cc2a34a..8cb14f3c9 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -98,6 +98,19 @@ type Server struct { // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a @@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +func (s *Server) maxDecoderHeaderTableSize() uint32 { + if v := s.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (s *Server) maxEncoderHeaderTableSize() uint32 { + if v := s.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + // maxQueuedControlFrames is the maximum number of control frames like // SETTINGS, PING and RST_STREAM that will be queued for writing before // the connection is closed to prevent memory exhaustion attacks. @@ -394,7 +421,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { advMaxStreams: s.maxConcurrentStreams(), initialStreamSendWindowSize: initialWindowSize, maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -422,14 +448,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // configured value for inflow, that will be updated when we send a // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) + sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) fr := NewFramer(sc.bw, c) if s.CountError != nil { fr.countError = s.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr @@ -536,8 +563,8 @@ type serverConn struct { wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control + flow outflow // conn-wide (not stream-specific) outbound flow control + inflow inflow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler @@ -559,9 +586,9 @@ type serverConn struct { streams map[uint32]*stream initialStreamSendWindowSize int32 maxFrameSize int32 - headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + canonHeaderKeysSize int // canonHeader keys size in bytes writingFrame bool // started writing a frame (on serve goroutine or separate) writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush @@ -614,15 +641,17 @@ type stream struct { cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow outflow // limits writing from Handler to client + inflow inflow // what the client is allowed to POST/etc to us state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -738,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } +// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size +// of the entries in the canonHeader cache. +// This should be larger than the size of unique, uncommon header keys likely to +// be sent by the peer, while not so high as to permit unreasonable memory usage +// if the peer sends an unbounded number of unique header keys. +const maxCachedCanonicalHeadersKeysSize = 2048 + func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() buildCommonHeaderMapsOnce() @@ -753,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) - // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of - // entries in the canonHeader cache. This should be larger than the number - // of unique, uncommon header keys likely to be sent by the peer, while not - // so high as to permit unreasonable memory usage if the peer sends an unbounded - // number of unique header keys. - const maxCachedCanonicalHeaders = 32 - if len(sc.canonHeader) < maxCachedCanonicalHeaders { + size := 100 + len(v)*2 // 100 bytes of map overhead + key + value + if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize { sc.canonHeader[v] = cv + sc.canonHeaderKeysSize += size } return cv } @@ -811,8 +843,13 @@ type frameWriteResult struct { // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + var err error + if wd == nil { + err = wr.write.writeFrame(sc) + } else { + err = sc.framer.endWrite() + } sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} } @@ -862,6 +899,7 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) @@ -869,7 +907,9 @@ func (sc *serverConn) serve() { // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - sc.sendWindowUpdate(nil) + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) @@ -946,6 +986,8 @@ func (sc *serverConn) serve() { } case *startPushRequest: sc.startPush(v) + case func(*serverConn): + v(sc) default: panic(fmt.Sprintf("unexpected type %T", v)) } @@ -1214,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) sc.wroteFrame(frameWriteResult{wr: wr, err: err}) + } else if wd, ok := wr.write.(*writeData); ok { + // Encode the frame in the serve goroutine, to ensure we don't have + // any lingering asynchronous references to data passed to Write. + // See https://go.dev/issue/58446. + sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil) + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr, wd) } else { sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) + go sc.writeFrameAsync(wr, nil) } } @@ -1459,6 +1508,21 @@ func (sc *serverConn) processFrame(f Frame) error { sc.sawFirstSettings = true } + // Discard frames for streams initiated after the identified last + // stream sent in a GOAWAY, or all frames after sending an error. + // We still need to return connection-level flow control for DATA frames. + // RFC 9113 Section 6.8. + if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) { + + if f, ok := f.(*DataFrame); ok { + if !sc.inflow.take(f.Length) { + return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl)) + } + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + } + return nil + } + switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) @@ -1501,9 +1565,6 @@ func (sc *serverConn) processPing(f *PingFrame) error { // PROTOCOL_ERROR." return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol)) } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) return nil } @@ -1565,6 +1626,9 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed + if st.readDeadline != nil { + st.readDeadline.Stop() + } if st.writeDeadline != nil { st.writeDeadline.Stop() } @@ -1586,10 +1650,18 @@ func (sc *serverConn) closeStream(st *stream, err error) { if p := st.body; p != nil { // Return any buffered unread bytes worth of conn-level flow control. // See golang.org/issue/16481 - sc.sendWindowUpdate(nil) + sc.sendWindowUpdate(nil, p.Len()) p.CloseWithError(err) } + if e, ok := err.(StreamError); ok { + if e.Cause != nil { + err = e.Cause + } else { + err = errStreamClosed + } + } + st.closeErr = err st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -1632,7 +1704,6 @@ func (sc *serverConn) processSetting(s Setting) error { } switch s.ID { case SettingHeaderTableSize: - sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 @@ -1686,16 +1757,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() id := f.Header().StreamID - if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) { - // Discard all DATA frames if the GOAWAY is due to an - // error, or: - // - // Section 6.8: After sending a GOAWAY frame, the sender - // can discard frames for streams initiated by the - // receiver with identifiers higher than the identified - // last stream. - return nil - } data := f.Data() state, st := sc.state(id) @@ -1726,15 +1787,10 @@ func (sc *serverConn) processData(f *DataFrame) error { // But still enforce their connection-level flow control, // and return any flow control bytes since we're not going // to consume them. - if sc.inflow.available() < int32(f.Length) { + if !sc.inflow.take(f.Length) { return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil) // conn-level + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level if st != nil && st.resetQueued { // Already have a stream error in flight. Don't send another. @@ -1748,11 +1804,10 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - if sc.inflow.available() < int32(f.Length) { + if !sc.inflow.take(f.Length) { return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) } - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil) // conn-level + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the @@ -1762,15 +1817,14 @@ func (sc *serverConn) processData(f *DataFrame) error { } if f.Length > 0 { // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { + if !takeInflows(&sc.inflow, &st.inflow, f.Length) { return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl)) } - st.inflow.take(int32(f.Length)) if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { - sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote)) + sc.sendWindowUpdate(nil, int(f.Length)-wrote) return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) } if wrote != len(data) { @@ -1781,10 +1835,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // Return any padded flow control now, since we won't // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } + // Call sendWindowUpdate even if there is no padding, + // to return buffered flow control credit if the sent + // window has shrunk. + pad := int32(f.Length) - int32(len(data)) + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) } if f.StreamEnded() { st.endStream() @@ -1838,19 +1894,27 @@ func (st *stream) copyTrailersToHandlerRequest() { } } +// onReadTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's ReadTimeout has fired. +func (st *stream) onReadTimeout() { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) +} + // onWriteTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's WriteTimeout has fired. func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) + st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{ + StreamID: st.id, + Code: ErrCodeInternal, + Cause: os.ErrDeadlineExceeded, + }}) } func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } // http://tools.ietf.org/html/rfc7540#section-5.1.1 // Streams initiated by a client MUST use odd-numbered stream // identifiers. [...] An endpoint that receives an unexpected @@ -1953,6 +2017,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) + if st.body != nil { + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + } } go sc.runHandler(rw, req, handler) @@ -2021,9 +2088,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error { } func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil { return err } @@ -2048,8 +2112,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout != 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2141,7 +2204,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r tlsState = sc.tlsState } - needsContinue := rp.header.Get("Expect") == "100-continue" + needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") if needsContinue { rp.header.Del("Expect") } @@ -2322,71 +2385,37 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { func (sc *serverConn) noteBodyRead(st *stream, n int) { sc.serveG.check() - sc.sendWindowUpdate(nil) // conn-level + sc.sendWindowUpdate(nil, n) // conn-level if st.state != stateHalfClosedRemote && st.state != stateClosed { // Don't send this WINDOW_UPDATE if the stream is closed // remotely. - sc.sendWindowUpdate(st) + sc.sendWindowUpdate(st, n) } } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream) { - sc.serveG.check() - - var n int32 - if st == nil { - if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 { - return - } else { - n = windowSize - avail - } - } else { - if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 { - return - } else { - n = windowSize - avail - } - } - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.sendWindowUpdate(st, int(n)) } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } var streamID uint32 - if st != nil { + var send int32 + if st == nil { + send = sc.inflow.add(n) + } else { streamID = st.id + send = st.inflow.add(n) + } + if send == 0 { + return } sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + write: writeWindowUpdate{streamID: streamID, n: uint32(send)}, stream: st, }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } } // requestBody is the Handler's Request.Body type. @@ -2474,7 +2503,15 @@ type responseWriterState struct { type chunkWriter struct{ rws *responseWriterState } -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } +func (cw chunkWriter) Write(p []byte) (n int, err error) { + n, err = cw.rws.writeChunk(p) + if err == errStreamClosed { + // If writing failed because the stream has been closed, + // return the reason it was closed. + err = cw.rws.stream.closeErr + } + return n, err +} func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } @@ -2668,23 +2705,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { } } +func (w *responseWriter) SetReadDeadline(deadline time.Time) error { + st := w.rws.stream + if !deadline.IsZero() && deadline.Before(time.Now()) { + // If we're setting a deadline in the past, reset the stream immediately + // so writes after SetWriteDeadline returns will fail. + st.onReadTimeout() + return nil + } + w.rws.conn.sendServeMsg(func(sc *serverConn) { + if st.readDeadline != nil { + if !st.readDeadline.Stop() { + // Deadline already exceeded, or stream has been closed. + return + } + } + if deadline.IsZero() { + st.readDeadline = nil + } else if st.readDeadline == nil { + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + } else { + st.readDeadline.Reset(deadline.Sub(time.Now())) + } + }) + return nil +} + +func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { + st := w.rws.stream + if !deadline.IsZero() && deadline.Before(time.Now()) { + // If we're setting a deadline in the past, reset the stream immediately + // so writes after SetWriteDeadline returns will fail. + st.onWriteTimeout() + return nil + } + w.rws.conn.sendServeMsg(func(sc *serverConn) { + if st.writeDeadline != nil { + if !st.writeDeadline.Stop() { + // Deadline already exceeded, or stream has been closed. + return + } + } + if deadline.IsZero() { + st.writeDeadline = nil + } else if st.writeDeadline == nil { + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + } else { + st.writeDeadline.Reset(deadline.Sub(time.Now())) + } + }) + return nil +} + func (w *responseWriter) Flush() { + w.FlushError() +} + +func (w *responseWriter) FlushError() error { rws := w.rws if rws == nil { panic("Header called after Handler finished") } + var err error if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } + err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write // (writeChunk with zero bytes, so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) + _, err = chunkWriter{rws}.Write(nil) + if err == nil { + select { + case <-rws.stream.cw: + err = rws.stream.closeErr + default: + } + } } + return err } func (w *responseWriter) CloseNotify() <-chan bool { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c5d005bba..05ba23d3d 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "io" + "io/fs" "log" "math" mathrand "math/rand" @@ -46,10 +47,6 @@ const ( // we buffer per stream. transportDefaultStreamFlow = 4 << 20 - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - defaultUserAgent = "Go-http-client/2.0" // initialMaxConcurrentStreams is a connections maxConcurrentStreams until @@ -117,6 +114,28 @@ type Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the + // initial settings frame. It is the size in bytes of the largest frame + // payload that the sender is willing to receive. If 0, no setting is + // sent, and the value is provided by the peer, which should be 16384 + // according to the spec: + // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. + // Values are bounded in the range 16k to 16M. + MaxReadFrameSize uint32 + + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // StrictMaxConcurrentStreams controls whether the server's // SETTINGS_MAX_CONCURRENT_STREAMS should be respected // globally. If false, new TCP connections are created to the @@ -170,6 +189,19 @@ func (t *Transport) maxHeaderListSize() uint32 { return t.MaxHeaderListSize } +func (t *Transport) maxFrameReadSize() uint32 { + if t.MaxReadFrameSize == 0 { + return 0 // use the default provided by the peer + } + if t.MaxReadFrameSize < minMaxFrameSize { + return minMaxFrameSize + } + if t.MaxReadFrameSize > maxFrameSize { + return maxFrameSize + } + return t.MaxReadFrameSize +} + func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } @@ -274,8 +306,8 @@ type ClientConn struct { mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool @@ -292,10 +324,11 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -339,10 +372,10 @@ type clientStream struct { respHeaderRecv chan struct{} // closed when headers are received res *http.Response // set if respHeaderRecv is closed - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read + flow outflow // guarded by cc.mu + inflow inflow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read reqBody io.ReadCloser reqBodyContentLength int64 // -1 means unknown @@ -501,6 +534,15 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } +var retryBackoffHook func(time.Duration) *time.Timer + +func backoffNewTimer(d time.Duration) *time.Timer { + if retryBackoffHook != nil { + return retryBackoffHook(d) + } + return time.NewTimer(d) +} + // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -526,11 +568,14 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) + d := time.Second * time.Duration(backoff) + timer := backoffNewTimer(d) select { - case <-time.After(time.Second * time.Duration(backoff)): + case <-timer.C: t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): + timer.Stop() err = req.Context().Err() } } @@ -668,6 +713,20 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } +func (t *Transport) maxDecoderHeaderTableSize() uint32 { + if v := t.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (t *Transport) maxEncoderHeaderTableSize() uint32 { + if v := t.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } @@ -708,15 +767,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.maxFrameReadSize() != 0 { + cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) + } if t.CountError != nil { cc.fr.countError = t.CountError } - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + maxHeaderTableSize := t.maxDecoderHeaderTableSize() + cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.peerMaxHeaderTableSize = initialHeaderTableSize if t.AllowHTTP { cc.nextStreamID = 3 @@ -731,14 +794,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro {ID: SettingEnablePush, Val: 0}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } + if max := t.maxFrameReadSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + if maxHeaderTableSize != initialHeaderTableSize { + initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize}) + } cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.inflow.init(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -1075,7 +1144,7 @@ var errRequestCanceled = errors.New("net/http: request canceled") func commaSeparatedTrailers(req *http.Request) (string, error) { keys := make([]string, 0, len(req.Trailer)) for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) + k = canonicalHeader(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": return "", fmt.Errorf("invalid Trailer key %q", k) @@ -1500,7 +1569,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { close(cs.donec) } -// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams. +// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { @@ -1612,7 +1681,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)]) + n, err := body.Read(buf) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1915,7 +1984,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - name, ascii := asciiToLower(name) + name, ascii := lowerHeader(name) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -1968,7 +2037,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := asciiToLower(k) + lowKey, ascii := lowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2000,8 +2069,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) + cs.inflow.init(transportDefaultStreamFlow) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2301,7 +2369,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := http.CanonicalHeaderKey(hf.Name) + key := canonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2309,7 +2377,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil + t[canonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2414,7 +2482,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) + key := canonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2460,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { } cc.mu.Lock() - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } + connAdd := cc.inflow.add(n) + var streamAdd int32 if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } + streamAdd = cs.inflow.add(n) } cc.mu.Unlock() @@ -2502,17 +2559,15 @@ func (b transportResponseBody) Close() error { if unread > 0 { cc.mu.Lock() // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - } + connAdd := cc.inflow.add(unread) cc.mu.Unlock() // TODO(dneil): Acquiring this mutex can block indefinitely. // Move flow control return to a goroutine? cc.wmu.Lock() // Return connection-level flow control. - if unread > 0 { - cc.fr.WriteWindowUpdate(0, uint32(unread)) + if connAdd > 0 { + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) } cc.bw.Flush() cc.wmu.Unlock() @@ -2555,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // But at least return their flow control: if f.Length > 0 { cc.mu.Lock() - cc.inflow.add(int32(f.Length)) + ok := cc.inflow.take(f.Length) + connAdd := cc.inflow.add(int(f.Length)) cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() + if !ok { + return ConnectionError(ErrCodeFlowControl) + } + if connAdd > 0 { + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) + cc.bw.Flush() + cc.wmu.Unlock() + } } return nil } @@ -2592,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } // Check connection-level flow control. cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { + if !takeInflows(&cc.inflow, &cs.inflow, f.Length) { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } @@ -2616,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } } - if refund > 0 { - cc.inflow.add(int32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - } + sendConn := cc.inflow.add(refund) + var sendStream int32 + if !didReset { + sendStream = cs.inflow.add(refund) } cc.mu.Unlock() - if refund > 0 { + if sendConn > 0 || sendStream > 0 { cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + if sendConn > 0 { + cc.fr.WriteWindowUpdate(0, uint32(sendConn)) + } + if sendStream > 0 { + cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream)) } cc.bw.Flush() cc.wmu.Unlock() @@ -2760,8 +2819,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc.cond.Broadcast() cc.initialWindowSize = s.Val + case SettingHeaderTableSize: + cc.henc.SetMaxDynamicTableSize(s.Val) + cc.peerMaxHeaderTableSize = s.Val default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil @@ -2985,7 +3046,11 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) { } func (gz *gzipReader) Close() error { - return gz.body.Close() + if err := gz.body.Close(); err != nil { + return err + } + gz.zerr = fs.ErrClosed + return nil } type errorReader struct{ err error } diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go new file mode 100644 index 000000000..cebde7634 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go new file mode 100644 index 000000000..cebde7634 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 +) diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go index 1f422e71d..846f0e1f9 100644 --- a/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -245,7 +245,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { return true, offset, nil } -// SetChecksum enables the kernel checksum processing. If on is ture, +// SetChecksum enables the kernel checksum processing. If on is true, // the offset should be an offset in bytes into the data of where the // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go index 9bf4286c7..d6c71101e 100644 --- a/vendor/golang.org/x/net/trace/histogram.go +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -32,7 +32,7 @@ type histogram struct { valueCount int64 // number of values recorded for single value } -// AddMeasurement records a value measurement observation to the histogram. +// addMeasurement records a value measurement observation to the histogram. func (h *histogram) addMeasurement(value int64) { // TODO: assert invariant h.sum += value diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index a4605e6d1..6cc73109f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (386 || amd64 || amd64p32) && gccgo // +build 386 amd64 amd64p32 // +build gccgo diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 79a38a0b9..a968b80fa 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -4,6 +4,11 @@ package cpu +import ( + "strings" + "syscall" +) + // HWCAP/HWCAP2 bits. These are exposed by Linux. const ( hwcap_FP = 1 << 0 @@ -32,10 +37,45 @@ const ( hwcap_ASIMDFHM = 1 << 23 ) +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + func doinit() { if err := readHWCAP(); err != nil { - // failed to read /proc/self/auxv, try reading registers directly - readARM64Registers() + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } return } diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go new file mode 100644 index 000000000..93ce03a34 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 + +package cpu + +// IsBigEndian records whether the GOARCH's byte order is big endian. +const IsBigEndian = true diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go new file mode 100644 index 000000000..fe545966b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh + +package cpu + +// IsBigEndian records whether the GOARCH's byte order is big endian. +const IsBigEndian = false diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 000000000..762b63d68 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 000000000..d87bd6b3e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 +// +build linux,arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index 1e7a9ada0..46c5b525e 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -7,9 +7,11 @@ package execabs -import "strings" +import ( + "errors" + "os/exec" +) func isGo119ErrDot(err error) bool { - // TODO: return errors.Is(err, exec.ErrDot) - return strings.Contains(err.Error(), "current directory") + return errors.Is(err, exec.ErrDot) } diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 0dee23222..b06f52d74 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gccgo && !aix -// +build gccgo,!aix +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 2cb1fefac..f98a1c542 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 6c7ad052e..1c51b0ec2 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 727cba212..8e3947c36 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -174,10 +174,10 @@ openbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_mips64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 1f6338218..192b071b3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -230,6 +230,7 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } +func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) } //sysnb pipe(p *[2]int32) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 61c0d0de1..a41111a79 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -255,6 +255,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index de7c23e06..d50b9dc25 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -319,6 +319,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b11ede89a..6a91d471d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) { return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint32(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9ed8eec6c..48110a0ab 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) { return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint64(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index f8ac98247..52f1d4b75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint32(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 8e932036e..5537ee4f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint64(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index cbe122278..164abd5d2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint64(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go new file mode 100644 index 000000000..4ffb64808 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -0,0 +1,22 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build hurd +// +build hurd + +package unix + +/* +#include +int ioctl(int, unsigned long int, uintptr_t); +*/ +import "C" + +func ioctl(fd int, req uint, arg uintptr) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go new file mode 100644 index 000000000..7cf54a3e4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 && hurd +// +build 386,hurd + +package unix + +const ( + TIOCGETA = 0x62251713 +) + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index c5a98440e..5443dddd4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1800,6 +1800,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) +//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) @@ -1973,36 +1974,46 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 //sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 -func bytes2iovec(bs [][]byte) []Iovec { - iovecs := make([]Iovec, len(bs)) - for i, b := range bs { - iovecs[i].SetLen(len(b)) +// minIovec is the size of the small initial allocation used by +// Readv, Writev, etc. +// +// This small allocation gets stack allocated, which lets the +// common use case of len(iovs) <= minIovs avoid more expensive +// heap allocations. +const minIovec = 8 + +// appendBytes converts bs to Iovecs and appends them to vecs. +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) if len(b) > 0 { - iovecs[i].Base = &b[0] + v.Base = &b[0] } else { - iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + v.Base = (*byte)(unsafe.Pointer(&_zero)) } + vecs = append(vecs, v) } - return iovecs + return vecs } -// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit -// systems, hi will always be 0. On 32-bit systems, offs will be split in half. -// preadv/pwritev chose this calling convention so they don't need to add a -// padding-register for alignment on ARM. +// offs2lohi splits offs into its low and high order bits. func offs2lohi(offs int64) (lo, hi uintptr) { - return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) + const longBits = SizeofLong * 8 + return uintptr(offs), uintptr(uint64(offs) >> (longBits - 1) >> 1) // two shifts to avoid false positive in vet } func Readv(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) readvRacedetect(iovecs, n, err) return n, err } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv(fd, iovecs, lo, hi) readvRacedetect(iovecs, n, err) @@ -2010,7 +2021,8 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv2(fd, iovecs, lo, hi, flags) readvRacedetect(iovecs, n, err) @@ -2037,7 +2049,8 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2047,7 +2060,8 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2058,7 +2072,8 @@ func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 666f0a1b3..35a3ad758 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,6 +110,20 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } +func SysctlUvmexp(name string) (*Uvmexp, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofUvmexp) + var u Uvmexp + if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil { + return nil, err + } + return &u, nil +} + func Pipe(p []int) (err error) { return Pipe2(p, 0) } @@ -245,6 +259,7 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 78daceb33..9b67b908e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -220,6 +220,7 @@ func Uname(uname *Utsname) error { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index e23c5394e..04aa43f41 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build openbsd && !mips64 -// +build openbsd,!mips64 +//go:build openbsd +// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 2109e569c..07ac56109 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -590,6 +590,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Creat(path string, mode uint32) (fd int, err error) //sys Dup(fd int) (nfd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 00bafda86..00f0aa375 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -331,6 +331,19 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +// Recvmsg receives a message from a socket using the recvmsg system call. The +// received non-control data will be written to p, and any "out of band" +// control data will be written to oob. The flags are passed to recvmsg. +// +// The results are: +// - n is the number of non-control data bytes read into p +// - oobn is the number of control data bytes read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +// +// If the underlying socket type is not SOCK_DGRAM, a received message +// containing oob data and a single '\0' of non-control data is treated as if +// the message contained only control data, i.e. n will be zero on return. func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var iov [1]Iovec if len(p) > 0 { @@ -346,13 +359,9 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from return } -// RecvmsgBuffers receives a message from a socket using the recvmsg -// system call. The flags are passed to recvmsg. Any non-control data -// read is scattered into the buffers slices. The results are: -// - n is the number of non-control data read into bufs -// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage] -// - recvflags is flags returned by recvmsg -// - from is the address of the sender +// RecvmsgBuffers receives a message from a socket using the recvmsg system +// call. This function is equivalent to Recvmsg, but non-control data read is +// scattered into the buffers slices. func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { iov := make([]Iovec, len(buffers)) for i := range buffers { @@ -371,11 +380,38 @@ func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn in return } +// Sendmsg sends a message on a socket to an address using the sendmsg system +// call. This function is equivalent to SendmsgN, but does not return the +// number of bytes actually sent. func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } +// SendmsgN sends a message on a socket to an address using the sendmsg system +// call. p contains the non-control data to send, and oob contains the "out of +// band" control data. The flags are passed to sendmsg. The number of +// non-control bytes actually written to the socket is returned. +// +// Some socket types do not support sending control data without accompanying +// non-control data. If p is empty, and oob contains control data, and the +// underlying socket type is not SOCK_DGRAM, p will be treated as containing a +// single '\0' and the return value will indicate zero bytes sent. +// +// The Go function Recvmsg, if called with an empty p and a non-empty oob, +// will read and ignore this additional '\0'. If the message is received by +// code that does not use Recvmsg, or that does not use Go at all, that code +// will need to be written to expect and ignore the additional '\0'. +// +// If you need to send non-empty oob with p actually empty, and if the +// underlying socket type supports it, you can do so via a raw system call as +// follows: +// +// msg := &unix.Msghdr{ +// Control: &oob[0], +// } +// msg.SetControllen(len(oob)) +// n, _, errno := unix.Syscall(unix.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), flags) func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { var iov [1]Iovec if len(p) > 0 { @@ -394,9 +430,8 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } // SendmsgBuffers sends a message on a socket to an address using the sendmsg -// system call. The flags are passed to sendmsg. Any non-control data written -// is gathered from buffers. The function returns the number of bytes written -// to the socket. +// system call. This function is equivalent to SendmsgN, but the non-control +// data is gathered from buffers. func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { iov := make([]Iovec, len(buffers)) for i := range buffers { @@ -543,7 +578,7 @@ func Lutimes(path string, tv []Timeval) error { return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } -// emptyIovec reports whether there are no bytes in the slice of Iovec. +// emptyIovecs reports whether there are no bytes in the slice of Iovec. func emptyIovecs(iov []Iovec) bool { for i := range iov { if iov[i].Len > 0 { diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 3d8930405..616b1b284 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -9,7 +9,7 @@ package unix import "time" -// TimespecToNSec returns the time stored in ts as nanoseconds. +// TimespecToNsec returns the time stored in ts as nanoseconds. func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } // NsecToTimespec converts a number of nanoseconds into a Timespec. diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 663b3779d..f5f8e9f36 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -36,9 +36,14 @@ func xattrnamespace(fullattr string) (ns int, attr string, err error) { func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { if len(dest) > idx { return unsafe.Pointer(&dest[idx]) - } else { - return unsafe.Pointer(_zero) } + if dest != nil { + // extattr_get_file and extattr_list_file treat NULL differently from + // a non-NULL pointer of length zero. Preserve the property of nilness, + // even if we can't use dest directly. + return unsafe.Pointer(&_zero) + } + return nil } // FreeBSD and NetBSD implement their own syscalls to handle extended attributes diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 785d693eb..e174685ad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,7 +457,6 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -563,6 +562,7 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_BUS_OFF_THRESHOLD = 0x100 CAN_CTRLMODE_3_SAMPLES = 0x4 CAN_CTRLMODE_BERR_REPORTING = 0x10 CAN_CTRLMODE_CC_LEN8_DLC = 0x100 @@ -577,9 +577,12 @@ const ( CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff + CAN_ERROR_PASSIVE_THRESHOLD = 0x80 + CAN_ERROR_WARNING_THRESHOLD = 0x60 CAN_ERR_ACK = 0x20 CAN_ERR_BUSERROR = 0x80 CAN_ERR_BUSOFF = 0x40 + CAN_ERR_CNT = 0x200 CAN_ERR_CRTL = 0x4 CAN_ERR_CRTL_ACTIVE = 0x40 CAN_ERR_CRTL_RX_OVERFLOW = 0x1 @@ -820,9 +823,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2022-02-22)" + DM_VERSION_EXTRA = "-ioctl (2022-07-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2e + DM_VERSION_MINOR = 0x2f DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -1049,6 +1052,7 @@ const ( ETH_P_CAIF = 0xf7 ETH_P_CAN = 0xc ETH_P_CANFD = 0xd + ETH_P_CANXL = 0xe ETH_P_CFM = 0x8902 ETH_P_CONTROL = 0x16 ETH_P_CUST = 0x6006 @@ -1060,6 +1064,7 @@ const ( ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b ETH_P_DSA_8021Q = 0xdadb + ETH_P_DSA_A5PSW = 0xe001 ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1194,8 +1199,10 @@ const ( FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORE = 0x400 FAN_MARK_IGNORED_MASK = 0x20 FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 @@ -1253,6 +1260,7 @@ const ( FSCRYPT_MODE_AES_128_CBC = 0x5 FSCRYPT_MODE_AES_128_CTS = 0x6 FSCRYPT_MODE_AES_256_CTS = 0x4 + FSCRYPT_MODE_AES_256_HCTR2 = 0xa FSCRYPT_MODE_AES_256_XTS = 0x1 FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 @@ -1430,6 +1438,7 @@ const ( IFF_NOARP = 0x80 IFF_NOFILTER = 0x1000 IFF_NOTRAILERS = 0x20 + IFF_NO_CARRIER = 0x40 IFF_NO_PI = 0x1000 IFF_ONE_QUEUE = 0x2000 IFF_PERSIST = 0x800 @@ -1805,6 +1814,7 @@ const ( MADV_DONTDUMP = 0x10 MADV_DONTFORK = 0xa MADV_DONTNEED = 0x4 + MADV_DONTNEED_LOCKED = 0x18 MADV_FREE = 0x8 MADV_HUGEPAGE = 0xe MADV_HWPOISON = 0x64 @@ -1846,7 +1856,7 @@ const ( MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16GB = 0x88000000 MFD_HUGE_16MB = 0x60000000 MFD_HUGE_1GB = 0x78000000 MFD_HUGE_1MB = 0x50000000 @@ -2212,6 +2222,11 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BR_ARM64_DEBUG_DATA = 0x7 + PERF_BR_ARM64_DEBUG_EXIT = 0x5 + PERF_BR_ARM64_DEBUG_HALT = 0x4 + PERF_BR_ARM64_DEBUG_INST = 0x6 + PERF_BR_ARM64_FIQ = 0x3 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 @@ -2232,6 +2247,8 @@ const ( PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 PERF_MEM_LVLNUM_ANY_CACHE = 0xb + PERF_MEM_LVLNUM_CXL = 0x9 + PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 PERF_MEM_LVLNUM_L3 = 0x3 @@ -2265,6 +2282,7 @@ const ( PERF_MEM_REMOTE_REMOTE = 0x1 PERF_MEM_REMOTE_SHIFT = 0x25 PERF_MEM_SNOOPX_FWD = 0x1 + PERF_MEM_SNOOPX_PEER = 0x2 PERF_MEM_SNOOPX_SHIFT = 0x26 PERF_MEM_SNOOP_HIT = 0x4 PERF_MEM_SNOOP_HITM = 0x10 @@ -2301,7 +2319,6 @@ const ( PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PIPEFS_MAGIC = 0x50495045 - PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e PRIO_PGRP = 0x1 @@ -2999,6 +3016,7 @@ const ( STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 STATX_CTIME = 0x80 + STATX_DIOALIGN = 0x2000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3392,9 +3410,7 @@ const ( XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 - Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 - ZSMALLOC_MAGIC = 0x58295829 _HIDIOCGRAWNAME_LEN = 0x80 _HIDIOCGRAWPHYS_LEN = 0x40 _HIDIOCGRAWUNIQ_LEN = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 36c0dfc7c..a46df0f1e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -133,6 +133,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc03c4d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 4ff942703..6cd4a3ea9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -133,6 +133,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3eaa0fb78..c7ebee24d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d7995bdc3..9d5352c3e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -134,6 +134,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 928e24c20..f26a164f4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -132,6 +132,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 179bffb47..890bc3c9b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 1fba17bd7..549f26ac6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index b77dde315..e0365e32c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 78c6c751b..fdccce15c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1c0d31f0b..b2205c83f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 959dd9bb8..81aa5ad0f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5a873cdbc..76807a1fd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e336d141e..d4a5ab9e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 390c01d92..66e65db95 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 98a6e5f11..f61925269 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -136,6 +136,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 6d56edc05..af20e474b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xccc84404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xccc8441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xccc84407 + DIOCGETRULES = 0xccc84406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc084444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0844450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,7 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 @@ -460,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -471,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -605,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -695,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -729,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -762,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -787,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -826,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -865,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -900,10 +1019,11 @@ const ( MAP_INHERIT_COPY = 0x1 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 + MAP_RENAME = 0x0 MAP_SHARED = 0x1 MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,13 +1050,29 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -946,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -953,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -968,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -977,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1015,7 +1160,6 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 - PT_MASK = 0x3ff000 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,49 +1197,57 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x10f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 - RTF_MASK = 0x80 + RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 RTF_PROTO3 = 0x2000 RTF_REJECT = 0x8 - RTF_SOURCE = 0x20000 RTF_STATIC = 0x800 - RTF_TUNNEL = 0x100000 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x805c693c + SIOCBRDGADDL = 0x805c6949 + SIOCBRDGADDS = 0x805c6941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x805c693d + SIOCBRDGDELS = 0x805c6942 + SIOCBRDGFLUSH = 0x805c6948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc05c693e SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc03c6958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc05c6942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x805c6955 + SIOCBRDGSIFFLGS = 0x805c693f + SIOCBRDGSIFPRIO = 0x805c6954 + SIOCBRDGSIFPROT = 0x805c694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,40 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1209,25 +1387,37 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 @@ -1238,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1245,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1258,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1287,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1298,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1357,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1378,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1390,8 +1619,8 @@ const ( WCONTINUED = 0x8 WCOREFLAG = 0x80 WNOHANG = 0x1 - WSTOPPED = 0x7f WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1405,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1431,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1459,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1472,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1568,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1624,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1638,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1665,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 25cb60948..6015fcb2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -109,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -137,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -177,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -240,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -292,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -323,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -351,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -441,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -466,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -732,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -797,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -906,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -970,12 +1051,26 @@ const ( MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -988,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -996,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1013,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1130,9 +1228,11 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 @@ -1140,7 +1240,6 @@ const ( RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe RTM_INVALIDATE = 0x11 - RTM_LOCK = 0x8 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 @@ -1148,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1166,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1182,35 +1284,37 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80286989 SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a @@ -1229,6 +1333,7 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc028698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d SIOCGIFGMEMB = 0xc028698a SIOCGIFGROUP = 0xc0286988 SIOCGIFHARDMTU = 0xc02069a5 @@ -1243,13 +1348,21 @@ const ( SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e SIOCGLIFPHYADDR = 0xc218694b SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 @@ -1287,19 +1400,20 @@ const ( SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1314,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1321,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1370,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1379,8 +1506,11 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCHKVERAUTH = 0x2000741e @@ -1445,7 +1575,6 @@ const ( TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 @@ -1467,7 +1596,8 @@ const ( VMIN = 0x10 VM_ANONMIN = 0x7 VM_LOADAVG = 0x2 - VM_MAXID = 0xc + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd VM_MAXSLP = 0xa VM_METER = 0x1 VM_NKMEMPAGES = 0x6 @@ -1745,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1772,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index aef6c0856..8d44955e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -82,7 +83,7 @@ const ( BIOCGFILDROP = 0x40044278 BIOCGHDRCMPLT = 0x40044274 BIOCGRSIG = 0x40044273 - BIOCGRTIMEOUT = 0x400c426e + BIOCGRTIMEOUT = 0x4010426e BIOCGSTATS = 0x4008426f BIOCIMMEDIATE = 0x80044270 BIOCLOCK = 0x20004276 @@ -96,7 +97,7 @@ const ( BIOCSFILDROP = 0x80044279 BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044272 - BIOCSRTIMEOUT = 0x800c426d + BIOCSRTIMEOUT = 0x8010426d BIOCVERSION = 0x40044271 BPF_A = 0x10 BPF_ABS = 0x20 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xcce04404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcce0441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xcce04407 + DIOCGETRULES = 0xcce04406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,8 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 F_SETFL = 0x4 @@ -459,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -470,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -604,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -694,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -728,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -761,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -786,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -825,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -864,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,12 +1050,27 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -947,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -954,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -969,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -978,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,24 +1197,29 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x70f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 - RTF_MASK = 0x80 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 @@ -1073,23 +1228,26 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc060693e SIOCBRDGGMA = 0xc0146953 - SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPARAM = 0xc0406958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,41 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1210,26 +1387,36 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 @@ -1241,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1248,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1261,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1290,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1301,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1321,7 +1530,7 @@ const ( TIOCGFLAGS = 0x4004745d TIOCGPGRP = 0x40047477 TIOCGSID = 0x40047463 - TIOCGTSTAMP = 0x400c745b + TIOCGTSTAMP = 0x4010745b TIOCGWINSZ = 0x40087468 TIOCMBIC = 0x8004746b TIOCMBIS = 0x8004746c @@ -1360,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1381,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1394,6 +1620,7 @@ const ( WCOREFLAG = 0x80 WNOHANG = 0x1 WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1407,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1433,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1461,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1474,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1570,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1626,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1640,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1667,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 90de7dfc3..ae16fe754 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -180,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -243,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -295,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -326,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -354,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -445,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -470,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -736,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -801,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -910,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -981,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -993,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1001,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1018,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1154,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1172,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1188,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1264,6 +1360,7 @@ const ( SIOCGPWE3CTRLWORD = 0xc02069dc SIOCGPWE3FAT = 0xc02069dd SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be @@ -1310,17 +1407,13 @@ const ( SIOCSPWE3CTRLWORD = 0x802069dc SIOCSPWE3FAT = 0x802069dd SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1335,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1342,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1391,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1400,6 +1506,7 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 TIMER_ABSTIME = 0x1 @@ -1768,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1795,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index f1154ff56..03d90fe35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -301,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -353,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -413,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -504,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -529,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -795,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -860,6 +873,7 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -970,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -1041,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -1053,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1061,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1078,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1214,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1232,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1248,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1378,11 +1414,6 @@ const ( SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1455,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1833,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1860,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {81920, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1b6eedfa6..54749f9c5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -552,6 +552,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 039c4aa06..77479d458 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 0535d3cfd..2e966d4d7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 1018b5221..d65a7c0fa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 3802f4b37..6f0b97c6d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 8a2db7da9..e1c23b527 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 293cf3680..36ea3a55b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -537,6 +537,17 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockAdjtime(clockid int32, buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_CLOCK_ADJTIME, uintptr(clockid), uintptr(unsafe.Pointer(buf)), 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGetres(clockid int32, res *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 4af561a48..79f738996 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3b90e9448..fb161f3a2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 890f4ccd1..4c8ac993a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index c79f071fc..76dd8ec4f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 2925fe0a7..caeb807bd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 75eb2f5f3..087444250 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 98446d2b9..a05e5f4ff 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 243a6663c..5782cd108 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 8da6791d1..b2da8e50c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index 9ad116d9f..cf310420c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 800aab6e3..048b2655e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 4efeff9ab..484bb42e0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 016d959bc..6f33e37e7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1504,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1517,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1525,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1538,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s new file mode 100644 index 000000000..55af27263 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd mips64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index c85de2d97..330cf7f7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 7c9223b64..4028255b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -249,6 +249,12 @@ TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_clock_gettime(SB) + RET +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_close(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 8e3e7873f..5f24de0d9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 7dba78927..e1fbd4dfa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 91f5a2bde..78d4a4240 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -38,6 +38,7 @@ import ( //go:cgo_import_dynamic libc_chmod chmod "libc.so" //go:cgo_import_dynamic libc_chown chown "libc.so" //go:cgo_import_dynamic libc_chroot chroot "libc.so" +//go:cgo_import_dynamic libc_clockgettime clockgettime "libc.so" //go:cgo_import_dynamic libc_close close "libc.so" //go:cgo_import_dynamic libc_creat creat "libc.so" //go:cgo_import_dynamic libc_dup dup "libc.so" @@ -177,6 +178,7 @@ import ( //go:linkname procChmod libc_chmod //go:linkname procChown libc_chown //go:linkname procChroot libc_chroot +//go:linkname procClockGettime libc_clockgettime //go:linkname procClose libc_close //go:linkname procCreat libc_creat //go:linkname procDup libc_dup @@ -317,6 +319,7 @@ var ( procChmod, procChown, procChroot, + procClockGettime, procClose, procCreat, procDup, @@ -750,6 +753,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 9e9d0b2a9..55e048471 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index adecd0966..d2243cf83 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -36,23 +36,29 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.dnsjackport", []_C_int{1, 13}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, @@ -81,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -108,15 +114,19 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, @@ -176,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -252,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 8ea52a4a1..82dc51bd8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index 154b57ae3..cbdda1a4a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -44,6 +45,7 @@ var sysctlMib = []mibentry{ {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, {"kern.audio", []_C_int{1, 84}}, @@ -51,6 +53,8 @@ var sysctlMib = []mibentry{ {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, @@ -83,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -110,13 +114,16 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, @@ -179,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -255,7 +261,6 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index d96bb2ba4..f55eae1a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -86,7 +87,6 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, @@ -123,6 +123,7 @@ var sysctlMib = []mibentry{ {"kern.ttycount", []_C_int{1, 57}}, {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index a37f77375..01c43a01f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index ff6881167..7d9fc8f1c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -29,6 +29,41 @@ type Itimerval struct { Value Timeval } +const ( + ADJ_OFFSET = 0x1 + ADJ_FREQUENCY = 0x2 + ADJ_MAXERROR = 0x4 + ADJ_ESTERROR = 0x8 + ADJ_STATUS = 0x10 + ADJ_TIMECONST = 0x20 + ADJ_TAI = 0x80 + ADJ_SETOFFSET = 0x100 + ADJ_MICRO = 0x1000 + ADJ_NANO = 0x2000 + ADJ_TICK = 0x4000 + ADJ_OFFSET_SINGLESHOT = 0x8001 + ADJ_OFFSET_SS_READ = 0xa001 +) + +const ( + STA_PLL = 0x1 + STA_PPSFREQ = 0x2 + STA_PPSTIME = 0x4 + STA_FLL = 0x8 + STA_INS = 0x10 + STA_DEL = 0x20 + STA_UNSYNC = 0x40 + STA_FREQHOLD = 0x80 + STA_PPSSIGNAL = 0x100 + STA_PPSJITTER = 0x200 + STA_PPSWANDER = 0x400 + STA_PPSERROR = 0x800 + STA_CLOCKERR = 0x1000 + STA_NANO = 0x2000 + STA_MODE = 0x4000 + STA_CLK = 0x8000 +) + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -53,29 +88,30 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - _ uint64 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + _ [12]uint64 } type Fsid struct { @@ -1099,7 +1135,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1118,7 +1155,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 - PERF_SAMPLE_BRANCH_MAX = 0x40000 + PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 + PERF_SAMPLE_BRANCH_MAX = 0x80000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1132,7 +1170,10 @@ const ( PERF_BR_COND_RET = 0xa PERF_BR_ERET = 0xb PERF_BR_IRQ = 0xc - PERF_BR_MAX = 0xd + PERF_BR_SERROR = 0xd + PERF_BR_NO_TX = 0xe + PERF_BR_EXTEND_ABI = 0xf + PERF_BR_MAX = 0x10 PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1151,7 +1192,8 @@ const ( PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_FORMAT_MAX = 0x10 + PERF_FORMAT_LOST = 0x10 + PERF_FORMAT_MAX = 0x20 PERF_IOC_FLAG_GROUP = 0x1 PERF_RECORD_MMAP = 0x1 PERF_RECORD_LOST = 0x2 @@ -2979,7 +3021,16 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x51 + DEVLINK_CMD_RATE_GET = 0x4a + DEVLINK_CMD_RATE_SET = 0x4b + DEVLINK_CMD_RATE_NEW = 0x4c + DEVLINK_CMD_RATE_DEL = 0x4d + DEVLINK_CMD_LINECARD_GET = 0x4e + DEVLINK_CMD_LINECARD_SET = 0x4f + DEVLINK_CMD_LINECARD_NEW = 0x50 + DEVLINK_CMD_LINECARD_DEL = 0x51 + DEVLINK_CMD_SELFTESTS_GET = 0x52 + DEVLINK_CMD_MAX = 0x53 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3208,7 +3259,13 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xae + DEVLINK_ATTR_LINECARD_INDEX = 0xab + DEVLINK_ATTR_LINECARD_STATE = 0xac + DEVLINK_ATTR_LINECARD_TYPE = 0xad + DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae + DEVLINK_ATTR_NESTED_DEVLINK = 0xaf + DEVLINK_ATTR_SELFTESTS = 0xb0 + DEVLINK_ATTR_MAX = 0xb0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3317,7 +3374,8 @@ const ( LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 LWTUNNEL_ENCAP_IOAM6 = 0x9 - LWTUNNEL_ENCAP_MAX = 0x9 + LWTUNNEL_ENCAP_XFRM = 0xa + LWTUNNEL_ENCAP_MAX = 0xa MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3512,7 +3570,9 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21 ETHTOOL_MSG_MODULE_GET = 0x22 ETHTOOL_MSG_MODULE_SET = 0x23 - ETHTOOL_MSG_USER_MAX = 0x23 + ETHTOOL_MSG_PSE_GET = 0x24 + ETHTOOL_MSG_PSE_SET = 0x25 + ETHTOOL_MSG_USER_MAX = 0x25 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3550,7 +3610,8 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22 ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 - ETHTOOL_MSG_KERNEL_MAX = 0x24 + ETHTOOL_MSG_PSE_GET_REPLY = 0x25 + ETHTOOL_MSG_KERNEL_MAX = 0x25 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3609,7 +3670,8 @@ const ( ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7 ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8 ETHTOOL_A_LINKMODES_LANES = 0x9 - ETHTOOL_A_LINKMODES_MAX = 0x9 + ETHTOOL_A_LINKMODES_RATE_MATCHING = 0xa + ETHTOOL_A_LINKMODES_MAX = 0xa ETHTOOL_A_LINKSTATE_UNSPEC = 0x0 ETHTOOL_A_LINKSTATE_HEADER = 0x1 ETHTOOL_A_LINKSTATE_LINK = 0x2 @@ -4201,6 +4263,9 @@ const ( NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 NL80211_AC_VI = 0x1 NL80211_AC_VO = 0x0 + NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 0x1 + NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 0x2 + NL80211_AP_SME_SA_QUERY_OFFLOAD = 0x1 NL80211_ATTR_4ADDR = 0x53 NL80211_ATTR_ACK = 0x5c NL80211_ATTR_ACK_SIGNAL = 0x107 @@ -4209,6 +4274,7 @@ const ( NL80211_ATTR_AIRTIME_WEIGHT = 0x112 NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4240,6 +4306,9 @@ const ( NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 NL80211_ATTR_COALESCE_RULE_MAX = 0x3 NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_COLOR_CHANGE_COLOR = 0x130 + NL80211_ATTR_COLOR_CHANGE_COUNT = 0x12f + NL80211_ATTR_COLOR_CHANGE_ELEMS = 0x131 NL80211_ATTR_CONN_FAILED_REASON = 0x9b NL80211_ATTR_CONTROL_PORT = 0x44 NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 @@ -4266,6 +4335,7 @@ const ( NL80211_ATTR_DEVICE_AP_SME = 0x8d NL80211_ATTR_DFS_CAC_TIME = 0x7 NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_EHT = 0x137 NL80211_ATTR_DISABLE_HE = 0x12d NL80211_ATTR_DISABLE_HT = 0x93 NL80211_ATTR_DISABLE_VHT = 0xaf @@ -4273,6 +4343,8 @@ const ( NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 @@ -4337,10 +4409,11 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x137 + NL80211_ATTR_MAX = 0x140 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde @@ -4350,6 +4423,8 @@ const ( NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MBSSID_CONFIG = 0x132 + NL80211_ATTR_MBSSID_ELEMS = 0x133 NL80211_ATTR_MCAST_RATE = 0x6b NL80211_ATTR_MDID = 0xb1 NL80211_ATTR_MEASUREMENT_DURATION = 0xeb @@ -4359,6 +4434,11 @@ const ( NL80211_ATTR_MESH_PEER_AID = 0xed NL80211_ATTR_MESH_SETUP = 0x70 NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MLD_ADDR = 0x13a + NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_ID = 0x139 + NL80211_ATTR_MLO_LINKS = 0x138 + NL80211_ATTR_MLO_SUPPORT = 0x13b NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4371,6 +4451,7 @@ const ( NL80211_ATTR_NETNS_FD = 0xdb NL80211_ATTR_NOACK_MAP = 0x95 NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OBSS_COLOR_BITMAP = 0x12e NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c NL80211_ATTR_OPER_CLASS = 0xd6 NL80211_ATTR_OPMODE_NOTIF = 0xc2 @@ -4397,6 +4478,7 @@ const ( NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 NL80211_ATTR_REASON_CODE = 0x36 NL80211_ATTR_RECEIVE_MULTICAST = 0x121 @@ -4412,6 +4494,7 @@ const ( NL80211_ATTR_RESP_IE = 0x4e NL80211_ATTR_ROAM_SUPPORT = 0x83 NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RX_HW_TIMESTAMP = 0x140 NL80211_ATTR_RXMGMT_FLAGS = 0xbc NL80211_ATTR_RX_SIGNAL_DBM = 0x97 NL80211_ATTR_S1G_CAPABILITY = 0x128 @@ -4484,6 +4567,7 @@ const ( NL80211_ATTR_TSID = 0xd2 NL80211_ATTR_TWT_RESPONDER = 0x116 NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_HW_TIMESTAMP = 0x13f NL80211_ATTR_TX_NO_CCK_RATE = 0x87 NL80211_ATTR_TXQ_LIMIT = 0x10a NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b @@ -4557,6 +4641,10 @@ const ( NL80211_BAND_ATTR_RATES = 0x2 NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET = 0xa + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY = 0x9 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE = 0xb NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 @@ -4564,6 +4652,8 @@ const ( NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS = 0x7 + NL80211_BAND_LC = 0x5 NL80211_BAND_S1GHZ = 0x4 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 NL80211_BITRATE_ATTR_MAX = 0x2 @@ -4584,7 +4674,9 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x14 + NL80211_BSS_MAX = 0x16 + NL80211_BSS_MLD_ADDR = 0x16 + NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 NL80211_BSS_PARENT_BSSID = 0x12 NL80211_BSS_PARENT_TSF = 0x11 @@ -4612,6 +4704,7 @@ const ( NL80211_CHAN_WIDTH_20 = 0x1 NL80211_CHAN_WIDTH_20_NOHT = 0x0 NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_320 = 0xd NL80211_CHAN_WIDTH_40 = 0x2 NL80211_CHAN_WIDTH_4 = 0xa NL80211_CHAN_WIDTH_5 = 0x6 @@ -4621,8 +4714,11 @@ const ( NL80211_CMD_ABORT_SCAN = 0x72 NL80211_CMD_ACTION = 0x3b NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_LINK = 0x94 + NL80211_CMD_ADD_LINK_STA = 0x96 NL80211_CMD_ADD_NAN_FUNCTION = 0x75 NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOC_COMEBACK = 0x93 NL80211_CMD_ASSOCIATE = 0x26 NL80211_CMD_AUTHENTICATE = 0x25 NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 @@ -4630,6 +4726,10 @@ const ( NL80211_CMD_CHANNEL_SWITCH = 0x66 NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_COLOR_CHANGE_ABORTED = 0x90 + NL80211_CMD_COLOR_CHANGE_COMPLETED = 0x91 + NL80211_CMD_COLOR_CHANGE_REQUEST = 0x8e + NL80211_CMD_COLOR_CHANGE_STARTED = 0x8f NL80211_CMD_CONNECT = 0x2e NL80211_CMD_CONN_FAILED = 0x5b NL80211_CMD_CONTROL_PORT_FRAME = 0x81 @@ -4678,8 +4778,9 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x93 + NL80211_CMD_MAX = 0x98 NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 NL80211_CMD_NEW_BEACON = 0xf NL80211_CMD_NEW_INTERFACE = 0x7 @@ -4692,6 +4793,7 @@ const ( NL80211_CMD_NEW_WIPHY = 0x3 NL80211_CMD_NOTIFY_CQM = 0x40 NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_OBSS_COLOR_COLLISION = 0x8d NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 NL80211_CMD_PEER_MEASUREMENT_START = 0x83 @@ -4707,6 +4809,8 @@ const ( NL80211_CMD_REGISTER_FRAME = 0x3a NL80211_CMD_RELOAD_REGDB = 0x7e NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REMOVE_LINK = 0x95 + NL80211_CMD_REMOVE_LINK_STA = 0x98 NL80211_CMD_REQ_SET_REG = 0x1b NL80211_CMD_ROAM = 0x2f NL80211_CMD_SCAN_ABORTED = 0x23 @@ -4717,6 +4821,7 @@ const ( NL80211_CMD_SET_CHANNEL = 0x41 NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_FILS_AAD = 0x92 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -4791,6 +4896,8 @@ const ( NL80211_EDMG_BW_CONFIG_MIN = 0x4 NL80211_EDMG_CHANNELS_MAX = 0x3c NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EHT_MAX_CAPABILITY_LEN = 0x33 + NL80211_EHT_MIN_CAPABILITY_LEN = 0xd NL80211_EXTERNAL_AUTH_ABORT = 0x1 NL80211_EXTERNAL_AUTH_START = 0x0 NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 @@ -4807,6 +4914,7 @@ const ( NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_COLOR = 0x3a NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a @@ -4818,6 +4926,7 @@ const ( NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD = 0x3b NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe @@ -4833,8 +4942,10 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 @@ -4906,7 +5017,9 @@ const ( NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa @@ -5006,6 +5119,12 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 + NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 + NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 + NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 NL80211_MESHCONF_ATTR_MAX = 0x1f NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 NL80211_MESHCONF_AWAKE_WINDOW = 0x1b @@ -5168,6 +5287,7 @@ const ( NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR = 0xd NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 @@ -5244,12 +5364,36 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 + NL80211_RATE_INFO_EHT_GI_1_6 = 0x1 + NL80211_RATE_INFO_EHT_GI_3_2 = 0x2 + NL80211_RATE_INFO_EHT_GI = 0x15 + NL80211_RATE_INFO_EHT_MCS = 0x13 + NL80211_RATE_INFO_EHT_NSS = 0x14 + NL80211_RATE_INFO_EHT_RU_ALLOC_106 = 0x3 + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26 = 0x4 + NL80211_RATE_INFO_EHT_RU_ALLOC_242 = 0x5 + NL80211_RATE_INFO_EHT_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 = 0xb + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484 = 0xc + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996 = 0xd + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484 = 0xe + NL80211_RATE_INFO_EHT_RU_ALLOC_484 = 0x6 + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242 = 0x7 + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996 = 0xf + NL80211_RATE_INFO_EHT_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26 = 0x2 + NL80211_RATE_INFO_EHT_RU_ALLOC_996 = 0x8 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484 = 0x9 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242 = 0xa + NL80211_RATE_INFO_EHT_RU_ALLOC = 0x16 NL80211_RATE_INFO_HE_1XLTF = 0x0 NL80211_RATE_INFO_HE_2XLTF = 0x1 NL80211_RATE_INFO_HE_4XLTF = 0x2 @@ -5292,6 +5436,7 @@ const ( NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_320MHZ = 0x40000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 NL80211_RRF_NO_HE = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 2fd2060e6..9bc4c8f9d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -491,6 +491,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 6a5a1a8ae..bb05f655d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 84cc8d01e..db40e3a19 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -496,6 +496,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index c844e7096..11121151c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2ed718ca0..26eba23b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -58,22 +58,22 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Mode uint32 - Dev int32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - X__st_birthtim Timespec + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec } type Statfs_t struct { @@ -98,7 +98,7 @@ type Statfs_t struct { F_mntonname [90]byte F_mntfromname [90]byte F_mntfromspec [90]byte - Pad_cgo_0 [2]byte + _ [2]byte Mount_info [160]byte } @@ -111,13 +111,13 @@ type Flock_t struct { } type Dirent struct { - Fileno uint64 - Off int64 - Reclen uint16 - Type uint8 - Namlen uint8 - X__d_padding [4]uint8 - Name [256]int8 + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 } type Fsid struct { @@ -262,8 +262,8 @@ type FdSet struct { } const ( - SizeofIfMsghdr = 0xec - SizeofIfData = 0xd4 + SizeofIfMsghdr = 0xa0 + SizeofIfData = 0x88 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 @@ -292,7 +292,7 @@ type IfData struct { Link_state uint8 Mtu uint32 Metric uint32 - Pad uint32 + Rdomain uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 @@ -304,10 +304,10 @@ type IfData struct { Imcasts uint64 Omcasts uint64 Iqdrops uint64 + Oqdrops uint64 Noproto uint64 Capabilities uint32 Lastchange Timeval - Mclpool [7]Mclpool } type IfaMsghdr struct { @@ -368,20 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct { - Grown int32 - Alive uint16 - Hwm uint16 - Cwm uint16 - Lwm uint16 -} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -407,11 +399,14 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index b4fb97ebe..5a5479886 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -73,7 +73,6 @@ type Stat_t struct { Blksize int32 Flags uint32 Gen uint32 - _ [4]byte _ Timespec } @@ -81,7 +80,6 @@ type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 - _ [4]byte F_blocks uint64 F_bfree uint64 F_bavail int64 @@ -200,10 +198,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen uint32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -311,7 +307,6 @@ type IfData struct { Oqdrops uint64 Noproto uint64 Capabilities uint32 - _ [4]byte Lastchange Timeval } @@ -373,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -395,7 +388,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -411,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 2c4675040..be58c4e1f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -375,14 +375,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -412,7 +410,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ddee04514..52338266c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index eb13d4e8b..605cfdb12 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 7a6ba43a7..41cb3c01f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,7 +10,6 @@ import ( errorspkg "errors" "fmt" "runtime" - "strings" "sync" "syscall" "time" @@ -87,22 +86,13 @@ func StringToUTF16(s string) []uint16 { // s, with a terminating NUL added. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func UTF16FromString(s string) ([]uint16, error) { - if strings.IndexByte(s, 0) != -1 { - return nil, syscall.EINVAL - } - return utf16.Encode([]rune(s + "\x00")), nil + return syscall.UTF16FromString(s) } // UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, // with a terminating NUL and any bytes after the NUL removed. func UTF16ToString(s []uint16) string { - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) + return syscall.UTF16ToString(s) } // StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. @@ -367,6 +357,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode //sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible //sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 96ba8559c..ac60052e4 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -252,6 +252,7 @@ var ( procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") @@ -2180,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( return } +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { diff --git a/vendor/golang.org/x/term/AUTHORS b/vendor/golang.org/x/term/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/term/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/term/CONTRIBUTORS b/vendor/golang.org/x/term/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/term/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go index d59270880..1a40d1012 100644 --- a/vendor/golang.org/x/term/term.go +++ b/vendor/golang.org/x/term/term.go @@ -7,11 +7,11 @@ // // Putting a terminal into raw mode is the most common requirement: // -// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) -// if err != nil { -// panic(err) -// } -// defer term.Restore(int(os.Stdin.Fd()), oldState) +// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) +// if err != nil { +// panic(err) +// } +// defer term.Restore(int(os.Stdin.Fd()), oldState) // // Note that on non-Unix systems os.Stdin.Fd() may not be 0. package term diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index 535ab8257..f636667fb 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -233,7 +233,6 @@ func (t *Terminal) queue(data []rune) { t.outBuf = append(t.outBuf, []byte(string(data))...) } -var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} var space = []rune{' '} func isPrintable(key rune) bool { @@ -935,7 +934,7 @@ func (s *stRingBuffer) Add(a string) { // next most recent, and so on. If such an element doesn't exist then ok is // false. func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { - if n >= s.size { + if n < 0 || n >= s.size { return "", false } index := s.head - n diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/text/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/text/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index e4c081101..9d2ae547b 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -193,14 +193,14 @@ func (p *paragraph) run() { // // At the end of this function: // -// - The member variable matchingPDI is set to point to the index of the -// matching PDI character for each isolate initiator character. If there is -// no matching PDI, it is set to the length of the input text. For other -// characters, it is set to -1. -// - The member variable matchingIsolateInitiator is set to point to the -// index of the matching isolate initiator character for each PDI character. -// If there is no matching isolate initiator, or the character is not a PDI, -// it is set to -1. +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. func (p *paragraph) determineMatchingIsolates() { p.matchingPDI = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len()) @@ -435,7 +435,7 @@ func maxLevel(a, b level) level { } // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, -// either L or R, for each isolating run sequence. +// either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { length := len(indexes) types := make([]Class, length) @@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() { if t == NSM { s.types[i] = precedingCharacterType } else { - if t.in(LRI, RLI, FSI, PDI) { - precedingCharacterType = ON - } + // if t.in(LRI, RLI, FSI, PDI) { + // precedingCharacterType = ON + // } precedingCharacterType = t } } @@ -905,7 +905,7 @@ func (p *paragraph) getLevels(linebreaks []int) []level { // Lines are concatenated from left to right. So for example, the fifth // character from the left on the third line is // -// getReordering(linebreaks)[linebreaks[1] + 4] +// getReordering(linebreaks)[linebreaks[1] + 4] // // (linebreaks[1] is the position after the last character of the second // line, which is also the index of the first character on the third line, diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index 4c459c4b7..6a796e221 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -37,18 +37,6 @@ const ( unknownClass = ^Class(0) ) -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index 526c7033a..d69ccb4f9 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -110,10 +110,11 @@ func (p Properties) BoundaryAfter() bool { } // We pack quick check data in 4 bits: -// 5: Combines forward (0 == false, 1 == true) -// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) -// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. -// 1..0: Number of trailing non-starters. +// +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. // // When all 4 bits are zero, the character is inert, meaning it is never // influenced by normalization. diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index 95efcf26e..4747ad07a 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -18,16 +18,17 @@ import ( // A Form denotes a canonical representation of Unicode code points. // The Unicode-defined normalization and equivalence forms are: // -// NFC Unicode Normalization Form C -// NFD Unicode Normalization Form D -// NFKC Unicode Normalization Form KC -// NFKD Unicode Normalization Form KD +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD // // For a Form f, this documentation uses the notation f(x) to mean // the bytes or string x converted to the given form. // A position n in x is called a boundary if conversion to the form can // proceed independently on both sides: -// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// f(x) == append(f(x[0:n]), f(x[n:])...) // // References: https://unicode.org/reports/tr15/ and // https://unicode.org/notes/tn5/. diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 96a130d30..9115ef257 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -7315,7 +7315,7 @@ const recompMapPacked = "" + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E - "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 @@ -7342,7 +7342,7 @@ const recompMapPacked = "" + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 - "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 diff --git a/vendor/modules.txt b/vendor/modules.txt index 3ff799bac..89a7a99ca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -253,7 +253,7 @@ golang.org/x/exp/constraints # golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20221014081412-f15817d10f9b +# golang.org/x/net v0.7.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/http/httpguts @@ -271,7 +271,7 @@ golang.org/x/net/trace # golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.2.0 +# golang.org/x/sys v0.5.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -279,10 +279,10 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 +# golang.org/x/term v0.5.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.3.7 +# golang.org/x/text v0.7.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform From b5db6ee0b6630e7ec52caf5db80f14cbd7629fc4 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Wed, 15 Feb 2023 18:25:16 -0500 Subject: [PATCH 19/44] Update vendored Psiphon-Labs/quic-go - Update to upstream quic-go v0.32 --- go.mod | 7 +- go.sum | 21 +- .../Psiphon-Labs/quic-go/Changelog.md | 4 +- .../github.com/Psiphon-Labs/quic-go/README.md | 29 +- .../github.com/Psiphon-Labs/quic-go/config.go | 1 + .../Psiphon-Labs/quic-go/conn_id_generator.go | 4 - .../Psiphon-Labs/quic-go/connection.go | 237 ++-- .../github.com/Psiphon-Labs/quic-go/errors.go | 7 +- .../Psiphon-Labs/quic-go/frame_sorter.go | 9 +- .../github.com/Psiphon-Labs/quic-go/framer.go | 31 +- .../Psiphon-Labs/quic-go/http3/client.go | 21 +- .../Psiphon-Labs/quic-go/http3/request.go | 4 +- .../quic-go/http3/request_writer.go | 7 +- .../quic-go/http3/response_writer.go | 9 +- .../Psiphon-Labs/quic-go/http3/roundtrip.go | 71 +- .../Psiphon-Labs/quic-go/http3/server.go | 35 +- .../Psiphon-Labs/quic-go/interface.go | 6 +- .../internal/ackhandler/ack_eliciting.go | 2 +- .../quic-go/internal/ackhandler/ackhandler.go | 3 +- .../quic-go/internal/ackhandler/frame.go | 22 +- .../quic-go/internal/ackhandler/packet.go | 10 +- .../ackhandler/received_packet_handler.go | 7 +- .../ackhandler/received_packet_history.go | 10 +- .../ackhandler/received_packet_tracker.go | 4 - .../ackhandler/sent_packet_history.go | 19 +- .../internal/handshake/crypto_setup.go | 47 +- .../internal/handshake/initial_aead.go | 2 +- .../quic-go/internal/handshake/retry.go | 36 +- .../quic-go/internal/protocol/version.go | 2 +- .../quic-go/internal/qtls/go121.go | 5 + .../quic-go/internal/qtls/go_oldversion.go | 2 +- .../internal/utils/linkedlist/README.md | 4 +- .../internal/utils/linkedlist/linkedlist.go | 33 +- .../quic-go/internal/utils/log.go | 2 +- .../quic-go/internal/wire/extended_header.go | 153 +- .../quic-go/internal/wire/frame_parser.go | 57 +- .../quic-go/internal/wire/header.go | 66 +- .../quic-go/internal/wire/interface.go | 2 +- .../quic-go/internal/wire/short_header.go | 18 + .../internal/wire/transport_parameters.go | 3 + .../Psiphon-Labs/quic-go/logging/interface.go | 6 +- .../Psiphon-Labs/quic-go/logging/multiplex.go | 10 +- .../quic-go/logging/null_tracer.go | 11 +- .../quic-go/logging/packet_header.go | 3 - .../quic-go/packet_handler_map.go | 4 +- .../Psiphon-Labs/quic-go/packet_packer.go | 694 ++++++---- .../Psiphon-Labs/quic-go/packet_unpacker.go | 24 +- .../Psiphon-Labs/quic-go/quicvarint/varint.go | 18 +- .../Psiphon-Labs/quic-go/receive_stream.go | 8 +- .../quic-go/retransmission_queue.go | 22 +- .../Psiphon-Labs/quic-go/send_stream.go | 47 +- .../github.com/Psiphon-Labs/quic-go/server.go | 70 +- .../github.com/Psiphon-Labs/quic-go/stream.go | 11 +- .../Psiphon-Labs/quic-go/streams_map.go | 11 +- .../Psiphon-Labs/quic-go/sys_conn_df_linux.go | 3 +- .../quic-go/sys_conn_df_windows.go | 3 +- .../Psiphon-Labs/quic-go/token_store.go | 16 +- .../go-task/slim-sprig/.editorconfig | 14 + .../go-task/slim-sprig/.gitattributes | 1 + .../github.com/go-task/slim-sprig/.gitignore | 2 + .../go-task/slim-sprig/CHANGELOG.md | 364 +++++ .../github.com/go-task/slim-sprig/LICENSE.txt | 19 + .../github.com/go-task/slim-sprig/README.md | 73 + .../go-task/slim-sprig/Taskfile.yml | 12 + .../github.com/go-task/slim-sprig/crypto.go | 24 + vendor/github.com/go-task/slim-sprig/date.go | 152 ++ .../github.com/go-task/slim-sprig/defaults.go | 163 +++ vendor/github.com/go-task/slim-sprig/dict.go | 118 ++ vendor/github.com/go-task/slim-sprig/doc.go | 19 + .../go-task/slim-sprig/functions.go | 317 +++++ vendor/github.com/go-task/slim-sprig/list.go | 464 +++++++ .../github.com/go-task/slim-sprig/network.go | 12 + .../github.com/go-task/slim-sprig/numeric.go | 228 +++ .../github.com/go-task/slim-sprig/reflect.go | 28 + vendor/github.com/go-task/slim-sprig/regex.go | 83 ++ .../github.com/go-task/slim-sprig/strings.go | 189 +++ vendor/github.com/go-task/slim-sprig/url.go | 66 + vendor/github.com/google/pprof/AUTHORS | 7 + vendor/github.com/google/pprof/CONTRIBUTORS | 16 + vendor/github.com/google/pprof/LICENSE | 202 +++ .../github.com/google/pprof/profile/encode.go | 567 ++++++++ .../github.com/google/pprof/profile/filter.go | 270 ++++ .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 +++++ .../google/pprof/profile/legacy_profile.go | 1225 +++++++++++++++++ .../github.com/google/pprof/profile/merge.go | 481 +++++++ .../google/pprof/profile/profile.go | 805 +++++++++++ .../github.com/google/pprof/profile/proto.go | 370 +++++ .../github.com/google/pprof/profile/prune.go | 178 +++ vendor/github.com/onsi/ginkgo/v2/LICENSE | 20 + .../onsi/ginkgo/v2/config/deprecated.go | 69 + .../ginkgo/v2/formatter/colorable_others.go | 41 + .../ginkgo/v2/formatter/colorable_windows.go | 809 +++++++++++ .../onsi/ginkgo/v2/formatter/formatter.go | 195 +++ .../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/command.go | 50 + .../onsi/ginkgo/v2/ginkgo/command/program.go | 182 +++ .../ginkgo/generators/boostrap_templates.go | 48 + .../v2/ginkgo/generators/bootstrap_command.go | 113 ++ .../v2/ginkgo/generators/generate_command.go | 239 ++++ .../ginkgo/generators/generate_templates.go | 41 + .../v2/ginkgo/generators/generators_common.go | 63 + .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 152 ++ .../ginkgo/internal/profiles_and_reports.go | 237 ++++ .../onsi/ginkgo/v2/ginkgo/internal/run.go | 348 +++++ .../ginkgo/v2/ginkgo/internal/test_suite.go | 283 ++++ .../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 ++ .../ginkgo/v2/ginkgo/labels/labels_command.go | 123 ++ .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 218 +++ .../onsi/ginkgo/v2/ginkgo/outline/import.go | 65 + .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 103 ++ .../v2/ginkgo/outline/outline_command.go | 98 ++ .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 230 ++++ .../v2/ginkgo/unfocus/unfocus_command.go | 186 +++ .../onsi/ginkgo/v2/ginkgo/watch/delta.go | 22 + .../ginkgo/v2/ginkgo/watch/delta_tracker.go | 75 + .../ginkgo/v2/ginkgo/watch/dependencies.go | 92 ++ .../ginkgo/v2/ginkgo/watch/package_hash.go | 108 ++ .../ginkgo/v2/ginkgo/watch/package_hashes.go | 85 ++ .../onsi/ginkgo/v2/ginkgo/watch/suite.go | 87 ++ .../ginkgo/v2/ginkgo/watch/watch_command.go | 190 +++ .../interrupt_handler/interrupt_handler.go | 196 +++ .../sigquit_swallower_unix.go | 15 + .../sigquit_swallower_windows.go | 8 + .../parallel_support/client_server.go | 70 + .../internal/parallel_support/http_client.go | 156 +++ .../internal/parallel_support/http_server.go | 223 +++ .../internal/parallel_support/rpc_client.go | 123 ++ .../internal/parallel_support/rpc_server.go | 75 + .../parallel_support/server_handler.go | 209 +++ .../ginkgo/v2/reporters/default_reporter.go | 555 ++++++++ .../v2/reporters/deprecated_reporter.go | 149 ++ .../onsi/ginkgo/v2/reporters/json_report.go | 60 + .../onsi/ginkgo/v2/reporters/junit_report.go | 338 +++++ .../onsi/ginkgo/v2/reporters/reporter.go | 21 + .../ginkgo/v2/reporters/teamcity_report.go | 97 ++ .../onsi/ginkgo/v2/types/code_location.go | 92 ++ .../github.com/onsi/ginkgo/v2/types/config.go | 732 ++++++++++ .../onsi/ginkgo/v2/types/deprecated_types.go | 141 ++ .../ginkgo/v2/types/deprecation_support.go | 170 +++ .../onsi/ginkgo/v2/types/enum_support.go | 43 + .../github.com/onsi/ginkgo/v2/types/errors.go | 543 ++++++++ .../onsi/ginkgo/v2/types/file_filter.go | 106 ++ .../github.com/onsi/ginkgo/v2/types/flags.go | 489 +++++++ .../onsi/ginkgo/v2/types/label_filter.go | 347 +++++ .../onsi/ginkgo/v2/types/report_entry.go | 186 +++ .../github.com/onsi/ginkgo/v2/types/types.go | 652 +++++++++ .../onsi/ginkgo/v2/types/version.go | 3 + .../qpack/.codecov.yml | 0 .../qpack/.gitignore | 0 .../qpack/.gitmodules | 0 .../qpack/.golangci.yml | 0 .../qpack/LICENSE.md | 0 .../qpack/README.md | 0 .../qpack/decoder.go | 0 .../qpack/encoder.go | 0 .../qpack/header_field.go | 0 .../qpack/static_table.go | 0 vendor/github.com/quic-go/qpack/tools.go | 5 + .../qpack/varint.go | 0 vendor/modules.txt | 30 +- 162 files changed, 18592 insertions(+), 840 deletions(-) create mode 100644 vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go121.go create mode 100644 vendor/github.com/go-task/slim-sprig/.editorconfig create mode 100644 vendor/github.com/go-task/slim-sprig/.gitattributes create mode 100644 vendor/github.com/go-task/slim-sprig/.gitignore create mode 100644 vendor/github.com/go-task/slim-sprig/CHANGELOG.md create mode 100644 vendor/github.com/go-task/slim-sprig/LICENSE.txt create mode 100644 vendor/github.com/go-task/slim-sprig/README.md create mode 100644 vendor/github.com/go-task/slim-sprig/Taskfile.yml create mode 100644 vendor/github.com/go-task/slim-sprig/crypto.go create mode 100644 vendor/github.com/go-task/slim-sprig/date.go create mode 100644 vendor/github.com/go-task/slim-sprig/defaults.go create mode 100644 vendor/github.com/go-task/slim-sprig/dict.go create mode 100644 vendor/github.com/go-task/slim-sprig/doc.go create mode 100644 vendor/github.com/go-task/slim-sprig/functions.go create mode 100644 vendor/github.com/go-task/slim-sprig/list.go create mode 100644 vendor/github.com/go-task/slim-sprig/network.go create mode 100644 vendor/github.com/go-task/slim-sprig/numeric.go create mode 100644 vendor/github.com/go-task/slim-sprig/reflect.go create mode 100644 vendor/github.com/go-task/slim-sprig/regex.go create mode 100644 vendor/github.com/go-task/slim-sprig/strings.go create mode 100644 vendor/github.com/go-task/slim-sprig/url.go create mode 100644 vendor/github.com/google/pprof/AUTHORS create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 vendor/github.com/google/pprof/LICENSE create mode 100644 vendor/github.com/google/pprof/profile/encode.go create mode 100644 vendor/github.com/google/pprof/profile/filter.go create mode 100644 vendor/github.com/google/pprof/profile/index.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 vendor/github.com/google/pprof/profile/merge.go create mode 100644 vendor/github.com/google/pprof/profile/profile.go create mode 100644 vendor/github.com/google/pprof/profile/proto.go create mode 100644 vendor/github.com/google/pprof/profile/prune.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/v2/config/deprecated.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/config.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/enum_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/errors.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/file_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/flags.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/label_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/report_entry.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/version.go rename vendor/github.com/{marten-seemann => quic-go}/qpack/.codecov.yml (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/.gitignore (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/.gitmodules (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/.golangci.yml (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/LICENSE.md (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/README.md (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/decoder.go (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/encoder.go (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/header_field.go (100%) rename vendor/github.com/{marten-seemann => quic-go}/qpack/static_table.go (100%) create mode 100644 vendor/github.com/quic-go/qpack/tools.go rename vendor/github.com/{marten-seemann => quic-go}/qpack/varint.go (100%) diff --git a/go.mod b/go.mod index b48d29510..b2c225f13 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7 github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464 - github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 + github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 @@ -66,19 +66,22 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dchest/siphash v1.2.3-0.20201109081723-a21c2e7914a8 // indirect github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3-0.20210916003710-5d5e8c018a13 // indirect github.com/google/go-cmp v0.5.8 // indirect github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4 // indirect + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect github.com/josharian/native v1.0.0 // indirect github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect github.com/klauspost/compress v1.15.10-0.20220729101446-5a3a4a965cc6 // indirect - github.com/marten-seemann/qpack v0.3.0 // indirect github.com/mdlayher/netlink v1.4.2-0.20210930205308-a81a8c23d40a // indirect github.com/mdlayher/socket v0.0.0-20210624160740-9dbe287ded84 // indirect github.com/mroth/weightedrand v0.4.1 // indirect + github.com/onsi/ginkgo/v2 v2.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507 // indirect github.com/smartystreets/goconvey v1.7.2 // indirect gitlab.com/yawning/obfs4.git v0.0.0-20190120164510-816cff15f425 // indirect diff --git a/go.sum b/go.sum index 5b3469354..8e36d7570 100644 --- a/go.sum +++ b/go.sum @@ -24,6 +24,8 @@ github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3 h1:BKSZdSkhOG github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3/go.mod h1:llhtSl7dUXTssUN4m4MjUDJrULGNxgZBMKYjExuk6EM= github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 h1:xTGnmXqEEUphnohYLGwGrlVzWPNpTNAFViJYxQUMHRU= github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66/go.mod h1:cu4yhfHkyt+uQ9FFFjTpjCjcQYf52ntEAyoV4Zg0+fg= +github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc h1:FUmGSvMiMbf1tFXWbK0+N7+5zBhOol8CHQdpB4ZQlDg= +github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc/go.mod h1:cu4yhfHkyt+uQ9FFFjTpjCjcQYf52ntEAyoV4Zg0+fg= github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad h1:m6HS84+b5xDPLj7D/ya1CeixyaHOCZoMbBilJ48y+Ts= github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad/go.mod h1:v3y9GXFo9Sf2mO6auD2ExGG7oDgrK8TI7eb49ZnUxrE= github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI= @@ -37,6 +39,9 @@ github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 h1:BU+NxuoaYPIvvp github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 h1:a1zrFsLFac2xoM6zG1u72DWJwZG3ayttYLfmLbxVETk= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cognusion/go-cache-lru v0.0.0-20170419142635-f73e2280ecea h1:9C2rdYRp8Vzwhm3sbFX0yYfB+70zKFRjn7cnPCucHSw= @@ -63,6 +68,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439 h1:T6zlOdzrYuHf6HUKujm9bzkzbZ5Iv/xf6rs8BHZDpoI= github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -94,6 +101,8 @@ github.com/google/gopacket v1.1.19-0.20200831200443-df1bbd09a561 h1:VB5cLlMqQWru github.com/google/gopacket v1.1.19-0.20200831200443-df1bbd09a561/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4 h1:OL2d27ueTKnlQJoqLW2fc9pWYulFnJYLWzomGV7HqZo= github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4/go.mod h1:Pw1H1OjSNHiqeuxAduB1BKYXIwFtsyrY47nEqSgEiCM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/grafov/m3u8 v0.0.0-20171211212457-6ab8f28ed427 h1:xh96CCAZTX8LJPFoOVRgTwZbn2DvJl8fyCyivohhSIg= @@ -102,6 +111,7 @@ github.com/h2non/gock v1.0.9/go.mod h1:CZMcB0Lg5IWnr9bF79pPMg9WeV6WumxQiUJ1UvdO1 github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= @@ -131,10 +141,6 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= -github.com/marten-seemann/qpack v0.3.0/go.mod h1:cGfKPBiP4a9EQdxCwEwI/GEeWAsjSekBvx/X8mh58+g= github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a h1:6SRny9FLB1eWasPyDUqBQnMi9NhXU01XIlB0ao89YoI= github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a/go.mod h1:TmeOqAKoDinfPfSohs14CO3VcEf7o+Bem6JiNe05yrQ= github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= @@ -171,6 +177,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo/v2 v2.2.0 h1:3ZNA3L1c5FYDFTTxbFeVGGD8jYvjYauHD30YgLxVsNI= +github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -186,6 +194,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/refraction-networking/utls v1.0.0/go.mod h1:tz9gX959MEFfFN5whTIocCLUG57WiILqtdVxI8c6Wj0= github.com/refraction-networking/utls v1.1.3 h1:K9opY+iKxcGvHOBG2019wFEVtsNFh0f5WqHyc2i3iU0= github.com/refraction-networking/utls v1.1.3/go.mod h1:+D89TUtA8+NKVFj1IXWr0p3tSdX1+SqUB7rL0QnGqyg= @@ -202,6 +212,7 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI= @@ -276,6 +287,7 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -348,6 +360,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/Psiphon-Labs/quic-go/Changelog.md b/vendor/github.com/Psiphon-Labs/quic-go/Changelog.md index c1c332327..82df5fb24 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/Changelog.md +++ b/vendor/github.com/Psiphon-Labs/quic-go/Changelog.md @@ -101,8 +101,8 @@ - Add a `quic.Config` option to configure keep-alive - Rename the STK to Cookie - Implement `net.Conn`-style deadlines for streams -- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details. -- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details. +- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details. +- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details. - Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper` - Changed `h2quic.Server.Serve()` to accept a `net.PacketConn` - Drop support for Go 1.7 and 1.8. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/README.md b/vendor/github.com/Psiphon-Labs/quic-go/README.md index 5efb4f436..b41a2de4c 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/README.md +++ b/vendor/github.com/Psiphon-Labs/quic-go/README.md @@ -2,8 +2,8 @@ -[![PkgGoDev](https://pkg.go.dev/badge/github.com/lucas-clemente/quic-go)](https://pkg.go.dev/github.com/lucas-clemente/quic-go) -[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/quic-go)](https://pkg.go.dev/github.com/quic-go/quic-go) +[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/quic-go/) quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) and Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)). @@ -45,18 +45,19 @@ http.Client{ ## Projects using quic-go -| Project | Description | Stars | -|------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------| -| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) | -| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) | -| [go-ipfs](https://github.com/ipfs/go-ipfs) | IPFS implementation in go | ![GitHub Repo stars](https://img.shields.io/github/stars/ipfs/go-ipfs?style=flat-square) | -| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | -| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | -| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) | -| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) | -| [OONI Probe](https://github.com/ooni/probe-cli) | The Open Observatory of Network Interference (OONI) aims to empower decentralized efforts in documenting Internet censorship around the world. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | -| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) | +| Project | Description | Stars | +|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-------| +| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server. | ![GitHub Repo stars](https://img.shields.io/github/stars/AdguardTeam/AdGuardHome?style=flat-square) | +| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) | +| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) | +| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) | +| [go-libp2p](https://github.com/libp2p/go-libp2p) | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others | ![GitHub Repo stars](https://img.shields.io/github/stars/libp2p/go-libp2p?style=flat-square) | +| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | +| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | +| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | +| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) | +| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) | ## Contributing -We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. +We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/config.go b/vendor/github.com/Psiphon-Labs/quic-go/config.go index a664a421d..b830ad790 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/config.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/config.go @@ -135,6 +135,7 @@ func populateConfig(config *Config, defaultConnIDLen int) *Config { EnableDatagrams: config.EnableDatagrams, DisablePathMTUDiscovery: config.DisablePathMTUDiscovery, DisableVersionNegotiationPackets: config.DisableVersionNegotiationPackets, + Allow0RTT: config.Allow0RTT, Tracer: config.Tracer, // [Psiphon] diff --git a/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go b/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go index 587f54eb2..27f1bc12d 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go @@ -22,8 +22,6 @@ type connIDGenerator struct { retireConnectionID func(protocol.ConnectionID) replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte) queueControlFrame func(wire.Frame) - - version protocol.VersionNumber } func newConnIDGenerator( @@ -36,7 +34,6 @@ func newConnIDGenerator( replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte), queueControlFrame func(wire.Frame), generator ConnectionIDGenerator, - version protocol.VersionNumber, ) *connIDGenerator { m := &connIDGenerator{ generator: generator, @@ -47,7 +44,6 @@ func newConnIDGenerator( retireConnectionID: retireConnectionID, replaceWithClosed: replaceWithClosed, queueControlFrame: queueControlFrame, - version: version, } m.activeSrcConnIDs[0] = initialConnectionID m.initialClientDestConnID = initialClientDestConnID diff --git a/vendor/github.com/Psiphon-Labs/quic-go/connection.go b/vendor/github.com/Psiphon-Labs/quic-go/connection.go index 5fd81867f..dda7babf1 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/connection.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/connection.go @@ -25,7 +25,7 @@ import ( ) type unpacker interface { - UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) + UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) } @@ -218,6 +218,9 @@ type connection struct { datagramQueue *datagramQueue + connStateMutex sync.Mutex + connState ConnectionState + logID string tracer logging.ConnectionTracer logger utils.Logger @@ -241,7 +244,6 @@ var newConnection = func( conf *Config, tlsConf *tls.Config, tokenGenerator *handshake.TokenGenerator, - enable0RTT bool, clientAddressValidated bool, tracer logging.ConnectionTracer, tracingID uint64, @@ -282,7 +284,6 @@ var newConnection = func( runner.ReplaceWithClosed, s.queueControlFrame, s.config.ConnectionIDGenerator, - s.version, ) s.preSetup() s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID)) @@ -301,7 +302,6 @@ var newConnection = func( s.perspective, s.tracer, s.logger, - s.version, ) initialStream := newCryptoStream() handshakeStream := newCryptoStream() @@ -330,6 +330,10 @@ var newConnection = func( if s.tracer != nil { s.tracer.SentTransportParameters(params) } + var allow0RTT func() bool + if conf.Allow0RTT != nil { + allow0RTT = func() bool { return conf.Allow0RTT(conn.RemoteAddr()) } + } cs := handshake.NewCryptoSetupServer( initialStream, handshakeStream, @@ -347,7 +351,7 @@ var newConnection = func( }, }, tlsConf, - enable0RTT, + allow0RTT, s.rttStats, tracer, logger, @@ -371,9 +375,8 @@ var newConnection = func( s.receivedPacketHandler, s.datagramQueue, s.perspective, - s.version, ) - s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, s.oneRTTStream) return s } @@ -424,7 +427,6 @@ var newClientConnection = func( runner.ReplaceWithClosed, s.queueControlFrame, s.config.ConnectionIDGenerator, - s.version, ) s.preSetup() s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID)) @@ -439,7 +441,6 @@ var newClientConnection = func( s.perspective, s.tracer, s.logger, - s.version, ) initialStream := newCryptoStream() handshakeStream := newCryptoStream() @@ -493,7 +494,7 @@ var newClientConnection = func( s.clientHelloWritten = clientHelloWritten s.cryptoStreamHandler = cs s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, newCryptoStream()) - s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) s.packer = newPacketPacker( srcConnID, s.connIDManager.Get, @@ -511,7 +512,6 @@ var newClientConnection = func( s.receivedPacketHandler, s.datagramQueue, s.perspective, - s.version, ) if len(tlsConf.ServerName) > 0 { s.tokenStoreKey = tlsConf.ServerName @@ -528,8 +528,8 @@ var newClientConnection = func( func (s *connection) preSetup() { s.sendQueue = newSendQueue(s.conn) - s.retransmissionQueue = newRetransmissionQueue(s.version) - s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams, s.version) + s.retransmissionQueue = newRetransmissionQueue() + s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams) s.rttStats = &utils.RTTStats{} s.connFlowController = flowcontrol.NewConnectionFlowController( protocol.ByteCount(s.config.InitialConnectionReceiveWindow), @@ -551,9 +551,8 @@ func (s *connection) preSetup() { uint64(s.config.MaxIncomingStreams), uint64(s.config.MaxIncomingUniStreams), s.perspective, - s.version, ) - s.framer = newFramer(s.streamsMap, s.version) + s.framer = newFramer(s.streamsMap) s.receivedPackets = make(chan *receivedPacket, protocol.MaxConnUnprocessedPackets) s.closeChan = make(chan closeError, 1) s.sendingScheduled = make(chan struct{}, 1) @@ -565,6 +564,7 @@ func (s *connection) preSetup() { s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame) s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger) + s.connState.Version = s.version } // run the connection main loop @@ -758,11 +758,10 @@ func (s *connection) supportsDatagrams() bool { } func (s *connection) ConnectionState() ConnectionState { - return ConnectionState{ - TLS: s.cryptoStreamHandler.ConnectionState(), - SupportsDatagrams: s.supportsDatagrams(), - Version: s.version, - } + s.connStateMutex.Lock() + defer s.connStateMutex.Unlock() + s.connState.TLS = s.cryptoStreamHandler.ConnectionState() + return s.connState } // Time when the next keep-alive packet should be sent. @@ -916,7 +915,7 @@ func (s *connection) handlePacketImpl(rp *receivedPacket) bool { } if wire.IsLongHeaderPacket(p.data[0]) { - hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnIDLen) + hdr, packetData, rest, err := wire.ParsePacket(p.data) if err != nil { if s.tracer != nil { dropReason := logging.PacketDropHeaderParseError @@ -1049,7 +1048,7 @@ func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header) return false } - packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data) + packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version) if err != nil { wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) return false @@ -1068,7 +1067,7 @@ func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header) return false } - if err := s.handleUnpackedPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil { + if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil { s.closeLocal(err) return false } @@ -1231,7 +1230,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) { }) } -func (s *connection) handleUnpackedPacket( +func (s *connection) handleUnpackedLongHeaderPacket( packet *unpackedPacket, ecn protocol.ECN, rcvTime time.Time, @@ -1250,7 +1249,7 @@ func (s *connection) handleUnpackedPacket( s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions) } // The server can change the source connection ID with the first Handshake packet. - if s.perspective == protocol.PerspectiveClient && packet.hdr.IsLongHeader && packet.hdr.SrcConnectionID != s.handshakeDestConnID { + if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID { cid := packet.hdr.SrcConnectionID s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid) s.handshakeDestConnID = cid @@ -1323,7 +1322,7 @@ func (s *connection) handleFrames( // If we're not tracing, this slice will always remain empty. var frames []wire.Frame for len(data) > 0 { - l, frame, err := s.frameParser.ParseNext(data, encLevel) + l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version) if err != nil { return false, err } @@ -1716,6 +1715,9 @@ func (s *connection) restoreTransportParameters(params *wire.TransportParameters s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit) s.connFlowController.UpdateSendWindow(params.InitialMaxData) s.streamsMap.UpdateLimits(params) + s.connStateMutex.Lock() + s.connState.SupportsDatagrams = s.supportsDatagrams() + s.connStateMutex.Unlock() } func (s *connection) handleTransportParameters(params *wire.TransportParameters) { @@ -1734,6 +1736,10 @@ func (s *connection) handleTransportParameters(params *wire.TransportParameters) // the client's transport parameters. close(s.earlyConnReadyChan) } + + s.connStateMutex.Lock() + s.connState.SupportsDatagrams = s.supportsDatagrams() + s.connStateMutex.Unlock() } func (s *connection) checkTransportParameters(params *wire.TransportParameters) error { @@ -1858,43 +1864,40 @@ func (s *connection) sendPackets() error { func (s *connection) maybeSendAckOnlyPacket() error { if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket(true) + packet, err := s.packer.PackCoalescedPacket(true, s.version) if err != nil { return err } if packet == nil { return nil } - s.logCoalescedPacket(packet) - for _, p := range packet.packets { - s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(time.Now(), s.retransmissionQueue)) - } - s.connIDManager.SentPacket() - s.sendQueue.Send(packet.buffer) + s.sendPackedCoalescedPacket(packet, time.Now()) return nil } - packet, err := s.packer.PackPacket(true) + now := time.Now() + p, buffer, err := s.packer.PackPacket(true, now, s.version) if err != nil { + if err == errNothingToPack { + return nil + } return err } - if packet == nil { - return nil - } - s.sendPackedPacket(packet, time.Now()) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false) + s.sendPackedShortHeaderPacket(buffer, p.Packet, now) return nil } func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error { // Queue probe packets until we actually send out a packet, // or until there are no more packets to queue. - var packet *packedPacket + var packet *coalescedPacket for { if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued { break } var err error - packet, err = s.packer.MaybePackProbePacket(encLevel) + packet, err = s.packer.MaybePackProbePacket(encLevel, s.version) if err != nil { return err } @@ -1915,15 +1918,15 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error { panic("unexpected encryption level") } var err error - packet, err = s.packer.MaybePackProbePacket(encLevel) + packet, err = s.packer.MaybePackProbePacket(encLevel, s.version) if err != nil { return err } } - if packet == nil || packet.packetContents == nil { + if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) { return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel) } - s.sendPackedPacket(packet, time.Now()) + s.sendPackedCoalescedPacket(packet, time.Now()) return nil } @@ -1935,44 +1938,59 @@ func (s *connection) sendPacket() (bool, error) { now := time.Now() if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket(false) + packet, err := s.packer.PackCoalescedPacket(false, s.version) if err != nil || packet == nil { return false, err } s.sentFirstPacket = true - s.logCoalescedPacket(packet) - for _, p := range packet.packets { - if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { - s.firstAckElicitingPacketAfterIdleSentTime = now - } - s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue)) - } - s.connIDManager.SentPacket() - s.sendQueue.Send(packet.buffer) + s.sendPackedCoalescedPacket(packet, now) return true, nil - } - if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) { - packet, err := s.packer.PackMTUProbePacket(s.mtuDiscoverer.GetPing()) + } else if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) { + ping, size := s.mtuDiscoverer.GetPing() + p, buffer, err := s.packer.PackMTUProbePacket(ping, size, now, s.version) if err != nil { return false, err } - s.sendPackedPacket(packet, now) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false) + s.sendPackedShortHeaderPacket(buffer, p.Packet, now) return true, nil } - packet, err := s.packer.PackPacket(false) - if err != nil || packet == nil { + p, buffer, err := s.packer.PackPacket(false, now, s.version) + if err != nil { + if err == errNothingToPack { + return false, nil + } return false, err } - s.sendPackedPacket(packet, now) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false) + s.sendPackedShortHeaderPacket(buffer, p.Packet, now) return true, nil } -func (s *connection) sendPackedPacket(packet *packedPacket, now time.Time) { - if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && packet.IsAckEliciting() { +func (s *connection) sendPackedShortHeaderPacket(buffer *packetBuffer, p *ackhandler.Packet, now time.Time) { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && ackhandler.HasAckElicitingFrames(p.Frames) { s.firstAckElicitingPacketAfterIdleSentTime = now } - s.logPacket(packet) - s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket(now, s.retransmissionQueue)) + + s.sentPacketHandler.SentPacket(p) + s.connIDManager.SentPacket() + s.sendQueue.Send(buffer) +} + +func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, now time.Time) { + s.logCoalescedPacket(packet) + for _, p := range packet.longHdrPackets { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue)) + } + if p := packet.shortHdrPacket; p != nil { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + s.sentPacketHandler.SentPacket(p.Packet) + } s.connIDManager.SentPacket() s.sendQueue.Send(packet.buffer) } @@ -1983,14 +2001,14 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) { var transportErr *qerr.TransportError var applicationErr *qerr.ApplicationError if errors.As(e, &transportErr) { - packet, err = s.packer.PackConnectionClose(transportErr) + packet, err = s.packer.PackConnectionClose(transportErr, s.version) } else if errors.As(e, &applicationErr) { - packet, err = s.packer.PackApplicationClose(applicationErr) + packet, err = s.packer.PackApplicationClose(applicationErr, s.version) } else { packet, err = s.packer.PackConnectionClose(&qerr.TransportError{ ErrorCode: qerr.InternalError, ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()), - }) + }, s.version) } if err != nil { return nil, err @@ -1999,7 +2017,18 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) { return packet.buffer.Data, s.conn.Write(packet.buffer.Data) } -func (s *connection) logPacketContents(p *packetContents) { +func (s *connection) logLongHeaderPacket(p *longHeaderPacket) { + // quic-go logging + if s.logger.Debug() { + p.header.Log(s.logger) + if p.ack != nil { + wire.LogFrame(s.logger, p.ack, true) + } + for _, frame := range p.frames { + wire.LogFrame(s.logger, frame.Frame, true) + } + } + // tracing if s.tracer != nil { frames := make([]logging.Frame, 0, len(p.frames)) @@ -2010,40 +2039,72 @@ func (s *connection) logPacketContents(p *packetContents) { if p.ack != nil { ack = logutils.ConvertAckFrame(p.ack) } - s.tracer.SentPacket(p.header, p.length, ack, frames) + s.tracer.SentLongHeaderPacket(p.header, p.length, ack, frames) } +} - // quic-go logging - if !s.logger.Debug() { - return +func (s *connection) logShortHeaderPacket( + destConnID protocol.ConnectionID, + ackFrame *wire.AckFrame, + frames []*ackhandler.Frame, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + size protocol.ByteCount, + isCoalesced bool, +) { + if s.logger.Debug() && !isCoalesced { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT", pn, size, s.logID) } - p.header.Log(s.logger) - if p.ack != nil { - wire.LogFrame(s.logger, p.ack, true) + // quic-go logging + if s.logger.Debug() { + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) + if ackFrame != nil { + wire.LogFrame(s.logger, ackFrame, true) + } + for _, frame := range frames { + wire.LogFrame(s.logger, frame.Frame, true) + } } - for _, frame := range p.frames { - wire.LogFrame(s.logger, frame.Frame, true) + + // tracing + if s.tracer != nil { + fs := make([]logging.Frame, 0, len(frames)) + for _, f := range frames { + fs = append(fs, logutils.ConvertFrame(f.Frame)) + } + var ack *logging.AckFrame + if ackFrame != nil { + ack = logutils.ConvertAckFrame(ackFrame) + } + s.tracer.SentShortHeaderPacket( + &logging.ShortHeader{ + DestConnectionID: destConnID, + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, + size, + ack, + fs, + ) } } func (s *connection) logCoalescedPacket(packet *coalescedPacket) { if s.logger.Debug() { - if len(packet.packets) > 1 { - s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.packets), packet.buffer.Len(), s.logID) + if len(packet.longHdrPackets) > 1 { + s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) } else { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.packets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.packets[0].EncryptionLevel()) + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) } } - for _, p := range packet.packets { - s.logPacketContents(p) + for _, p := range packet.longHdrPackets { + s.logLongHeaderPacket(p) } -} - -func (s *connection) logPacket(packet *packedPacket) { - if s.logger.Debug() { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.header.PacketNumber, packet.buffer.Len(), s.logID, packet.EncryptionLevel()) + if p := packet.shortHdrPacket; p != nil { + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, p.Length, true) } - s.logPacketContents(packet.packetContents) } // AcceptStream returns the next stream openend by the peer diff --git a/vendor/github.com/Psiphon-Labs/quic-go/errors.go b/vendor/github.com/Psiphon-Labs/quic-go/errors.go index 836b473a8..e0c21765b 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/errors.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/errors.go @@ -46,6 +46,7 @@ const ( type StreamError struct { StreamID StreamID ErrorCode StreamErrorCode + Remote bool } func (e *StreamError) Is(target error) bool { @@ -54,5 +55,9 @@ func (e *StreamError) Is(target error) bool { } func (e *StreamError) Error() string { - return fmt.Sprintf("stream %d canceled with error code %d", e.StreamID, e.ErrorCode) + pers := "local" + if e.Remote { + pers = "remote" + } + return fmt.Sprintf("stream %d canceled by %s with error code %d", e.StreamID, pers, e.ErrorCode) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/frame_sorter.go b/vendor/github.com/Psiphon-Labs/quic-go/frame_sorter.go index a19cb5e4b..e86d05c5f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/frame_sorter.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/frame_sorter.go @@ -2,6 +2,7 @@ package quic import ( "errors" + "sync" "github.com/Psiphon-Labs/quic-go/internal/protocol" list "github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist" @@ -13,6 +14,12 @@ type byteInterval struct { End protocol.ByteCount } +var byteIntervalElementPool sync.Pool + +func init() { + byteIntervalElementPool = *list.NewPool[byteInterval]() +} + type frameSorterEntry struct { Data []byte DoneCb func() @@ -28,7 +35,7 @@ var errDuplicateStreamData = errors.New("duplicate stream data") func newFrameSorter() *frameSorter { s := frameSorter{ - gaps: list.New[byteInterval](), + gaps: list.NewWithPool[byteInterval](&byteIntervalElementPool), queue: make(map[protocol.ByteCount]frameSorterEntry), } s.gaps.PushFront(byteInterval{Start: 0, End: protocol.MaxByteCount}) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/framer.go b/vendor/github.com/Psiphon-Labs/quic-go/framer.go index d93ca0fce..872897b22 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/framer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/framer.go @@ -14,10 +14,10 @@ type framer interface { HasData() bool QueueControlFrame(wire.Frame) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) + AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) AddActiveStream(protocol.StreamID) - AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) + AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) Handle0RTTRejection() error } @@ -26,7 +26,6 @@ type framerI struct { mutex sync.Mutex streamGetter streamGetter - version protocol.VersionNumber activeStreams map[protocol.StreamID]struct{} streamQueue []protocol.StreamID @@ -37,14 +36,10 @@ type framerI struct { var _ framer = &framerI{} -func newFramer( - streamGetter streamGetter, - v protocol.VersionNumber, -) framer { +func newFramer(streamGetter streamGetter) framer { return &framerI{ streamGetter: streamGetter, activeStreams: make(map[protocol.StreamID]struct{}), - version: v, } } @@ -67,16 +62,18 @@ func (f *framerI) QueueControlFrame(frame wire.Frame) { f.controlFrameMutex.Unlock() } -func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framerI) AppendControlFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) { var length protocol.ByteCount f.controlFrameMutex.Lock() for len(f.controlFrames) > 0 { frame := f.controlFrames[len(f.controlFrames)-1] - frameLen := frame.Length(f.version) + frameLen := frame.Length(v) if length+frameLen > maxLen { break } - frames = append(frames, ackhandler.Frame{Frame: frame}) + af := ackhandler.GetFrame() + af.Frame = frame + frames = append(frames, af) length += frameLen f.controlFrames = f.controlFrames[:len(f.controlFrames)-1] } @@ -93,7 +90,7 @@ func (f *framerI) AddActiveStream(id protocol.StreamID) { f.mutex.Unlock() } -func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framerI) AppendStreamFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) { var length protocol.ByteCount var lastFrame *ackhandler.Frame f.mutex.Lock() @@ -118,7 +115,7 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol. // Therefore, we can pretend to have more bytes available when popping // the STREAM frame (which will always have the DataLen set). remainingLen += quicvarint.Len(uint64(remainingLen)) - frame, hasMoreData := str.popStreamFrame(remainingLen) + frame, hasMoreData := str.popStreamFrame(remainingLen, v) if hasMoreData { // put the stream back in the queue (at the end) f.streamQueue = append(f.streamQueue, id) } else { // no more data to send. Stream is not active any more @@ -130,16 +127,16 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol. if frame == nil { continue } - frames = append(frames, *frame) - length += frame.Length(f.version) + frames = append(frames, frame) + length += frame.Length(v) lastFrame = frame } f.mutex.Unlock() if lastFrame != nil { - lastFrameLen := lastFrame.Length(f.version) + lastFrameLen := lastFrame.Length(v) // account for the smaller size of the last STREAM frame lastFrame.Frame.(*wire.StreamFrame).DataLenPresent = false - length += lastFrame.Length(f.version) - lastFrameLen + length += lastFrame.Length(v) - lastFrameLen } return frames, length } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go index d3b20de5b..736161e5e 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/client.go @@ -16,7 +16,8 @@ import ( "github.com/Psiphon-Labs/quic-go/internal/qtls" "github.com/Psiphon-Labs/quic-go/internal/utils" "github.com/Psiphon-Labs/quic-go/quicvarint" - "github.com/marten-seemann/qpack" + + "github.com/quic-go/qpack" ) // MethodGet0RTT allows a GET request to be sent using 0-RTT. @@ -72,7 +73,9 @@ type client struct { logger utils.Logger } -func newClient(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (*client, error) { +var _ roundTripCloser = &client{} + +func newClient(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) { if conf == nil { conf = defaultQuicConfig.Clone() } else if len(conf.Versions) == 0 { @@ -433,7 +436,7 @@ func (c *client) doRequest(req *http.Request, str quic.Stream, opt RoundTripOpt, // Rules for when to set Content-Length are defined in https://tools.ietf.org/html/rfc7230#section-3.3.2. _, hasTransferEncoding := res.Header["Transfer-Encoding"] isInformational := res.StatusCode >= 100 && res.StatusCode < 200 - isNoContent := res.StatusCode == 204 + isNoContent := res.StatusCode == http.StatusNoContent isSuccessfulConnect := req.Method == http.MethodConnect && res.StatusCode >= 200 && res.StatusCode < 300 if !hasTransferEncoding && !isInformational && !isNoContent && !isSuccessfulConnect { res.ContentLength = -1 @@ -456,3 +459,15 @@ func (c *client) doRequest(req *http.Request, str quic.Stream, opt RoundTripOpt, return res, requestError{} } + +func (c *client) HandshakeComplete() bool { + if c.conn == nil { + return false + } + select { + case <-c.conn.HandshakeComplete().Done(): + return true + default: + return false + } +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/request.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/request.go index 0b9a7278c..9af25a570 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/request.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/request.go @@ -1,14 +1,13 @@ package http3 import ( - "crypto/tls" "errors" "net/http" "net/url" "strconv" "strings" - "github.com/marten-seemann/qpack" + "github.com/quic-go/qpack" ) func requestFromHeaders(headers []qpack.HeaderField) (*http.Request, error) { @@ -101,7 +100,6 @@ func requestFromHeaders(headers []qpack.HeaderField) (*http.Request, error) { ContentLength: contentLength, Host: authority, RequestURI: requestURI, - TLS: &tls.ConnectionState{}, }, nil } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go index b219c577f..bf3e3aec0 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go @@ -10,12 +10,13 @@ import ( "strings" "sync" - "github.com/Psiphon-Labs/quic-go" - "github.com/Psiphon-Labs/quic-go/internal/utils" - "github.com/marten-seemann/qpack" "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + + "github.com/Psiphon-Labs/quic-go" + "github.com/Psiphon-Labs/quic-go/internal/utils" + "github.com/quic-go/qpack" ) const bodyCopyBufferSize = 8 * 1024 diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go index 2501fe022..e5c641f51 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go @@ -9,7 +9,8 @@ import ( "github.com/Psiphon-Labs/quic-go" "github.com/Psiphon-Labs/quic-go/internal/utils" - "github.com/marten-seemann/qpack" + + "github.com/quic-go/qpack" ) type responseWriter struct { @@ -80,7 +81,7 @@ func (w *responseWriter) WriteHeader(status int) { func (w *responseWriter) Write(p []byte) (int, error) { if !w.headerWritten { - w.WriteHeader(200) + w.WriteHeader(http.StatusOK) } if !bodyAllowedForStatus(w.status) { return 0, http.ErrBodyNotAllowed @@ -111,9 +112,9 @@ func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: return false - case status == 204: + case status == http.StatusNoContent: return false - case status == 304: + case status == http.StatusNotModified: return false } return true diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/roundtrip.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/roundtrip.go index e624a0779..a5ce0490d 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/roundtrip.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/roundtrip.go @@ -6,17 +6,19 @@ import ( "errors" "fmt" "io" + "net" "net/http" "strings" "sync" - "github.com/Psiphon-Labs/quic-go" - "golang.org/x/net/http/httpguts" + + "github.com/Psiphon-Labs/quic-go" ) type roundTripCloser interface { RoundTripOpt(*http.Request, RoundTripOpt) (*http.Response, error) + HandshakeComplete() bool io.Closer } @@ -75,7 +77,8 @@ type RoundTripper struct { // Zero means to use a default limit. MaxResponseHeaderBytes int64 - clients map[string]roundTripCloser + newClient func(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) // so we can mock it in tests + clients map[string]roundTripCloser } // RoundTripOpt are options for the Transport.RoundTripOpt method. @@ -110,22 +113,20 @@ func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http. closeRequestBody(req) return nil, errors.New("http3: nil Request.Header") } - - if req.URL.Scheme == "https" { - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("http3: invalid http header field name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k) - } - } - } - } else { + if req.URL.Scheme != "https" { closeRequestBody(req) return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme) } + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("http3: invalid http header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k) + } + } + } if req.Method != "" && !validMethod(req.Method) { closeRequestBody(req) @@ -133,11 +134,20 @@ func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http. } hostname := authorityAddr("https", hostnameFromRequest(req)) - cl, err := r.getClient(hostname, opt.OnlyCachedConn) + cl, isReused, err := r.getClient(hostname, opt.OnlyCachedConn) if err != nil { return nil, err } - return cl.RoundTripOpt(req, opt) + rsp, err := cl.RoundTripOpt(req, opt) + if err != nil { + r.removeClient(hostname) + if isReused { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return r.RoundTripOpt(req, opt) + } + } + } + return rsp, err } // RoundTrip does a round trip. @@ -145,7 +155,7 @@ func (r *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return r.RoundTripOpt(req, RoundTripOpt{}) } -func (r *RoundTripper) getClient(hostname string, onlyCached bool) (roundTripCloser, error) { +func (r *RoundTripper) getClient(hostname string, onlyCached bool) (rtc roundTripCloser, isReused bool, err error) { r.mutex.Lock() defer r.mutex.Unlock() @@ -156,10 +166,14 @@ func (r *RoundTripper) getClient(hostname string, onlyCached bool) (roundTripClo client, ok := r.clients[hostname] if !ok { if onlyCached { - return nil, ErrNoCachedConn + return nil, false, ErrNoCachedConn } var err error - client, err = newClient( + newCl := newClient + if r.newClient != nil { + newCl = r.newClient + } + client, err = newCl( hostname, r.TLSClientConfig, &roundTripperOpts{ @@ -173,11 +187,22 @@ func (r *RoundTripper) getClient(hostname string, onlyCached bool) (roundTripClo r.Dial, ) if err != nil { - return nil, err + return nil, false, err } r.clients[hostname] = client + } else if client.HandshakeComplete() { + isReused = true + } + return client, isReused, nil +} + +func (r *RoundTripper) removeClient(hostname string) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.clients == nil { + return } - return client, nil + delete(r.clients, hostname) } // Close closes the QUIC connections that this RoundTripper has used diff --git a/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go b/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go index 553b7eba1..26b5675fc 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/http3/server.go @@ -18,7 +18,8 @@ import ( "github.com/Psiphon-Labs/quic-go/internal/protocol" "github.com/Psiphon-Labs/quic-go/internal/utils" "github.com/Psiphon-Labs/quic-go/quicvarint" - "github.com/marten-seemann/qpack" + + "github.com/quic-go/qpack" ) // allows mocking of quic.Listen and quic.ListenAddr @@ -272,7 +273,7 @@ func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error { baseConf := ConfigureTLSConfig(tlsConf) quicConf := s.QuicConfig if quicConf == nil { - quicConf = &quic.Config{} + quicConf = &quic.Config{Allow0RTT: func(net.Addr) bool { return true }} } else { quicConf = s.QuicConfig.Clone() } @@ -570,6 +571,8 @@ func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *q return newStreamError(errorGeneralProtocolError, err) } + connState := conn.ConnectionState().TLS.ConnectionState + req.TLS = &connState req.RemoteAddr = conn.RemoteAddr().String() body := newRequestBody(newStream(str, onFrameError)) req.Body = body @@ -614,9 +617,9 @@ func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *q } if panicked { - r.WriteHeader(500) + r.WriteHeader(http.StatusInternalServerError) } else { - r.WriteHeader(200) + r.WriteHeader(http.StatusOK) } // If the EOF was read by the handler, CancelRead() is a no-op. str.CancelRead(quic.StreamErrorCode(errorNoError)) @@ -717,19 +720,6 @@ func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error } defer udpConn.Close() - tcpAddr, err := net.ResolveTCPAddr("tcp", addr) - if err != nil { - return err - } - tcpConn, err := net.ListenTCP("tcp", tcpAddr) - if err != nil { - return err - } - defer tcpConn.Close() - - tlsConn := tls.NewListener(tcpConn, config) - defer tlsConn.Close() - if handler == nil { handler = http.DefaultServeMux } @@ -738,17 +728,14 @@ func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error TLSConfig: config, Handler: handler, } - httpServer := &http.Server{ - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - quicServer.SetQuicHeaders(w.Header()) - handler.ServeHTTP(w, r) - }), - } hErr := make(chan error) qErr := make(chan error) go func() { - hErr <- httpServer.Serve(tlsConn) + hErr <- http.ListenAndServeTLS(addr, certFile, keyFile, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + quicServer.SetQuicHeaders(w.Header()) + handler.ServeHTTP(w, r) + })) }() go func() { qErr <- quicServer.Serve(udpConn) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/interface.go b/vendor/github.com/Psiphon-Labs/quic-go/interface.go index e97f352a2..69a02c45c 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/interface.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/interface.go @@ -177,7 +177,6 @@ type Connection interface { // Context returns a context that is cancelled when the connection is closed. Context() context.Context // ConnectionState returns basic details about the QUIC connection. - // It blocks until the handshake completes. // Warning: This API should not be considered stable and might change soon. ConnectionState() ConnectionState @@ -326,6 +325,11 @@ type Config struct { // This can be useful if version information is exchanged out-of-band. // It has no effect for a client. DisableVersionNegotiationPackets bool + // Allow0RTT allows the application to decide if a 0-RTT connection attempt should be accepted. + // When set, 0-RTT is enabled. When not set, 0-RTT is disabled. + // Only valid for the server. + // Warning: This API should not be considered stable and might change soon. + Allow0RTT func(net.Addr) bool // Enable QUIC datagram support (RFC 9221). EnableDatagrams bool Tracer logging.Tracer diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ack_eliciting.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ack_eliciting.go index 3926e3e70..db8bf9005 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ack_eliciting.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ack_eliciting.go @@ -10,7 +10,7 @@ func IsFrameAckEliciting(f wire.Frame) bool { } // HasAckElicitingFrames returns true if at least one frame is ack-eliciting. -func HasAckElicitingFrames(fs []Frame) bool { +func HasAckElicitingFrames(fs []*Frame) bool { for _, f := range fs { if IsFrameAckEliciting(f.Frame) { return true diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ackhandler.go index 1f3cd2633..9831bbadc 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ackhandler.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ackhandler.go @@ -17,8 +17,7 @@ func NewAckHandler( pers protocol.Perspective, tracer logging.ConnectionTracer, logger utils.Logger, - version protocol.VersionNumber, ) (SentPacketHandler, ReceivedPacketHandler) { sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, clientAddressValidated, pers, tracer, logger) - return sph, newReceivedPacketHandler(sph, rttStats, logger, version) + return sph, newReceivedPacketHandler(sph, rttStats, logger) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/frame.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/frame.go index c6a9db2b7..ebbf187f5 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/frame.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/frame.go @@ -1,9 +1,29 @@ package ackhandler -import "github.com/Psiphon-Labs/quic-go/internal/wire" +import ( + "sync" + + "github.com/Psiphon-Labs/quic-go/internal/wire" +) type Frame struct { wire.Frame // nil if the frame has already been acknowledged in another packet OnLost func(wire.Frame) OnAcked func(wire.Frame) } + +var framePool = sync.Pool{New: func() any { return &Frame{} }} + +func GetFrame() *Frame { + f := framePool.Get().(*Frame) + f.OnLost = nil + f.OnAcked = nil + return f +} + +func putFrame(f *Frame) { + f.Frame = nil + f.OnLost = nil + f.OnAcked = nil + framePool.Put(f) +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go index 5311ad86a..fc2ceb144 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go @@ -10,7 +10,7 @@ import ( // A Packet is a packet type Packet struct { PacketNumber protocol.PacketNumber - Frames []Frame + Frames []*Frame LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK Length protocol.ByteCount EncryptionLevel protocol.EncryptionLevel @@ -46,4 +46,10 @@ func GetPacket() *Packet { // We currently only return Packets back into the pool when they're acknowledged (not when they're lost). // This simplifies the code, and gives the vast majority of the performance benefit we can gain from using the pool. -func putPacket(p *Packet) { packetPool.Put(p) } +func putPacket(p *Packet) { + for _, f := range p.Frames { + putFrame(f) + } + p.Frames = nil + packetPool.Put(p) +} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go index 142af2aa3..81ef1deaf 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go @@ -25,13 +25,12 @@ func newReceivedPacketHandler( sentPackets sentPacketTracker, rttStats *utils.RTTStats, logger utils.Logger, - version protocol.VersionNumber, ) ReceivedPacketHandler { return &receivedPacketHandler{ sentPackets: sentPackets, - initialPackets: newReceivedPacketTracker(rttStats, logger, version), - handshakePackets: newReceivedPacketTracker(rttStats, logger, version), - appDataPackets: newReceivedPacketTracker(rttStats, logger, version), + initialPackets: newReceivedPacketTracker(rttStats, logger), + handshakePackets: newReceivedPacketTracker(rttStats, logger), + appDataPackets: newReceivedPacketTracker(rttStats, logger), lowest1RTTPacket: protocol.InvalidPacketNumber, } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go index 1e9726117..fb5c7c2aa 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go @@ -1,6 +1,8 @@ package ackhandler import ( + "sync" + "github.com/Psiphon-Labs/quic-go/internal/protocol" list "github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist" "github.com/Psiphon-Labs/quic-go/internal/wire" @@ -12,6 +14,12 @@ type interval struct { End protocol.PacketNumber } +var intervalElementPool sync.Pool + +func init() { + intervalElementPool = *list.NewPool[interval]() +} + // The receivedPacketHistory stores if a packet number has already been received. // It generates ACK ranges which can be used to assemble an ACK frame. // It does not store packet contents. @@ -23,7 +31,7 @@ type receivedPacketHistory struct { func newReceivedPacketHistory() *receivedPacketHistory { return &receivedPacketHistory{ - ranges: list.New[interval](), + ranges: list.NewWithPool[interval](&intervalElementPool), } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go index 770bc5baf..843356032 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go @@ -31,21 +31,17 @@ type receivedPacketTracker struct { lastAck *wire.AckFrame logger utils.Logger - - version protocol.VersionNumber } func newReceivedPacketTracker( rttStats *utils.RTTStats, logger utils.Logger, - version protocol.VersionNumber, ) *receivedPacketTracker { return &receivedPacketTracker{ packetHistory: newReceivedPacketHistory(), maxAckDelay: protocol.MaxAckDelay, rttStats: rttStats, logger: logger, - version: version, } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go index 73f50af9e..f961dc465 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go @@ -2,6 +2,7 @@ package ackhandler import ( "fmt" + "sync" "time" "github.com/Psiphon-Labs/quic-go/internal/protocol" @@ -17,11 +18,17 @@ type sentPacketHistory struct { highestSent protocol.PacketNumber } +var packetElementPool sync.Pool + +func init() { + packetElementPool = *list.NewPool[*Packet]() +} + func newSentPacketHistory(rttStats *utils.RTTStats) *sentPacketHistory { return &sentPacketHistory{ rttStats: rttStats, - outstandingPacketList: list.New[*Packet](), - etcPacketList: list.New[*Packet](), + outstandingPacketList: list.NewWithPool[*Packet](&packetElementPool), + etcPacketList: list.NewWithPool[*Packet](&packetElementPool), packetMap: make(map[protocol.PacketNumber]*list.Element[*Packet]), highestSent: protocol.InvalidPacketNumber, } @@ -108,8 +115,7 @@ func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error { if !ok { return fmt.Errorf("packet %d not found in sent packet history", p) } - h.outstandingPacketList.Remove(el) - h.etcPacketList.Remove(el) + el.List().Remove(el) delete(h.packetMap, p) return nil } @@ -139,10 +145,7 @@ func (h *sentPacketHistory) DeclareLost(p *Packet) *Packet { if !ok { return nil } - // try to remove it from both lists, as we don't know which one it currently belongs to. - // Remove is a no-op for elements that are not in the list. - h.outstandingPacketList.Remove(el) - h.etcPacketList.Remove(el) + el.List().Remove(el) p.declaredLost = true // move it to the correct position in the etc list (based on the packet number) for el = h.etcPacketList.Back(); el != nil; el = el.Prev() { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go index 25effda89..09dda6fc1 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "math" "net" "sync" "time" @@ -116,6 +117,7 @@ type cryptoSetup struct { clientHelloWritten bool clientHelloWrittenChan chan struct{} // is closed as soon as the ClientHello is written zeroRTTParametersChan chan<- *wire.TransportParameters + allow0RTT func() bool rttStats *utils.RTTStats @@ -216,7 +218,7 @@ func NewCryptoSetupServer( tp *wire.TransportParameters, runner handshakeRunner, tlsConf *tls.Config, - enable0RTT bool, + allow0RTT func() bool, rttStats *utils.RTTStats, tracer logging.ConnectionTracer, logger utils.Logger, @@ -229,7 +231,7 @@ func NewCryptoSetupServer( tp, runner, tlsConf, - enable0RTT, + allow0RTT != nil, rttStats, tracer, logger, @@ -239,6 +241,7 @@ func NewCryptoSetupServer( // [Psiphon] nil, ) + cs.allow0RTT = allow0RTT cs.conn = qtls.Server(newConn(localAddr, remoteAddr, version), cs.tlsConf, cs.extraConf) return cs } @@ -292,7 +295,7 @@ func newCryptoSetup( } var maxEarlyData uint32 if enable0RTT { - maxEarlyData = 0xffffffff + maxEarlyData = math.MaxUint32 } cs.extraConf = &qtls.ExtraConfig{ GetExtensions: extHandler.GetExtensions, @@ -515,13 +518,17 @@ func (h *cryptoSetup) accept0RTT(sessionTicketData []byte) bool { return false } valid := h.ourParams.ValidFor0RTT(t.Parameters) - if valid { - h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT) - h.rttStats.SetInitialRTT(t.RTT) - } else { + if !valid { h.logger.Debugf("Transport parameters changed. Rejecting 0-RTT.") + return false + } + if !h.allow0RTT() { + h.logger.Debugf("0-RTT not allowed. Rejecting 0-RTT.") + return false } - return valid + h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT) + h.rttStats.SetInitialRTT(t.RTT) + return true } // rejected0RTT is called for the client when the server rejects 0-RTT. @@ -600,7 +607,9 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph newHeaderProtector(suite, trafficSecret, true, h.version), ) h.mutex.Unlock() - h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } if h.tracer != nil { h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective.Opposite()) } @@ -613,12 +622,16 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph h.dropInitialKeys, h.perspective, ) - h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } case qtls.EncryptionApplication: h.readEncLevel = protocol.Encryption1RTT h.aead.SetReadKey(suite, trafficSecret) h.has1RTTOpener = true - h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } default: panic("unexpected read encryption level") } @@ -640,7 +653,9 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip newHeaderProtector(suite, trafficSecret, true, h.version), ) h.mutex.Unlock() - h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } if h.tracer != nil { h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective) } @@ -653,12 +668,16 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip h.dropInitialKeys, h.perspective, ) - h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } case qtls.EncryptionApplication: h.writeEncLevel = protocol.Encryption1RTT h.aead.SetWriteKey(suite, trafficSecret) h.has1RTTSealer = true - h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } if h.zeroRTTSealer != nil { h.zeroRTTSealer = nil h.logger.Debugf("Dropping 0-RTT keys.") diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go index 2fa9810c3..3509fa80b 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go @@ -13,7 +13,7 @@ import ( var ( quicSaltOld = []byte{0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, 0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99} quicSaltV1 = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a} - quicSaltV2 = []byte{0xa7, 0x07, 0xc2, 0x03, 0xa5, 0x9b, 0x47, 0x18, 0x4a, 0x1d, 0x62, 0xca, 0x57, 0x04, 0x06, 0xea, 0x7a, 0xe3, 0xe5, 0xd3} + quicSaltV2 = []byte{0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9} ) const ( diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/retry.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/retry.go index f7cf9d18a..edf185d64 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/retry.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/retry.go @@ -11,13 +11,15 @@ import ( ) var ( - oldRetryAEAD cipher.AEAD // used for QUIC draft versions up to 34 - retryAEAD cipher.AEAD // used for QUIC draft-34 + retryAEADdraft29 cipher.AEAD // used for QUIC draft versions up to 34 + retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000) + retryAEADv2 cipher.AEAD // used for QUIC v2 ) func init() { - oldRetryAEAD = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1}) - retryAEAD = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) + retryAEADdraft29 = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1}) + retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) + retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) } func initAEAD(key [16]byte) cipher.AEAD { @@ -33,30 +35,36 @@ func initAEAD(key [16]byte) cipher.AEAD { } var ( - retryBuf bytes.Buffer - retryMutex sync.Mutex - oldRetryNonce = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c} - retryNonce = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb} + retryBuf bytes.Buffer + retryMutex sync.Mutex + retryNonceDraft29 = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c} + retryNonceV1 = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb} + retryNonceV2 = [12]byte{0xd8, 0x69, 0x69, 0xbc, 0x2d, 0x7c, 0x6d, 0x99, 0x90, 0xef, 0xb0, 0x4a} ) // GetRetryIntegrityTag calculates the integrity tag on a Retry packet func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte { retryMutex.Lock() + defer retryMutex.Unlock() + retryBuf.WriteByte(uint8(origDestConnID.Len())) retryBuf.Write(origDestConnID.Bytes()) retryBuf.Write(retry) + defer retryBuf.Reset() var tag [16]byte var sealed []byte - if version != protocol.Version1 { - sealed = oldRetryAEAD.Seal(tag[:0], oldRetryNonce[:], nil, retryBuf.Bytes()) - } else { - sealed = retryAEAD.Seal(tag[:0], retryNonce[:], nil, retryBuf.Bytes()) + //nolint:exhaustive // These are all the versions we support + switch version { + case protocol.Version1: + sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes()) + case protocol.Version2: + sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes()) + default: + sealed = retryAEADdraft29.Seal(tag[:0], retryNonceDraft29[:], nil, retryBuf.Bytes()) } if len(sealed) != 16 { panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed))) } - retryBuf.Reset() - retryMutex.Unlock() return &tag } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/version.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/version.go index dd54dbd3c..2ae7a1154 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/version.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/version.go @@ -23,7 +23,7 @@ const ( VersionUnknown VersionNumber = math.MaxUint32 VersionDraft29 VersionNumber = 0xff00001d Version1 VersionNumber = 0x1 - Version2 VersionNumber = 0x709a50c4 + Version2 VersionNumber = 0x6b3343cf ) // SupportedVersions lists the versions that the server supports diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go121.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go121.go new file mode 100644 index 000000000..b33406397 --- /dev/null +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go121.go @@ -0,0 +1,5 @@ +//go:build go1.21 + +package qtls + +var _ int = "The version of quic-go you're using can't be built on Go 1.21 yet. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions." diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go_oldversion.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go_oldversion.go index 83418e9e8..f433b328c 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go_oldversion.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go_oldversion.go @@ -2,4 +2,4 @@ package qtls -var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions." +var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions." diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/README.md b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/README.md index 2915d6013..66482f4fb 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/README.md +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/README.md @@ -1,4 +1,6 @@ # Usage This is the Go standard library implementation of a linked list -(https://golang.org/src/container/list/list.go), modified to use Go generics. +(https://golang.org/src/container/list/list.go), with the following modifications: +* it uses Go generics +* it allows passing in a `sync.Pool` (via the `NewWithPool` constructor) to reduce allocations of `Element` structs diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/linkedlist.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/linkedlist.go index 217b21efb..804a34444 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/linkedlist.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/linkedlist.go @@ -11,6 +11,12 @@ // } package list +import "sync" + +func NewPool[T any]() *sync.Pool { + return &sync.Pool{New: func() any { return &Element[T]{} }} +} + // Element is an element of a linked list. type Element[T any] struct { // Next and previous pointers in the doubly-linked list of elements. @@ -43,11 +49,17 @@ func (e *Element[T]) Prev() *Element[T] { return nil } +func (e *Element[T]) List() *List[T] { + return e.list +} + // List represents a doubly linked list. // The zero value for List is an empty list ready to use. type List[T any] struct { root Element[T] // sentinel list element, only &root, root.prev, and root.next are used len int // current list length excluding (this) sentinel element + + pool *sync.Pool } // Init initializes or clears list l. @@ -61,6 +73,12 @@ func (l *List[T]) Init() *List[T] { // New returns an initialized list. func New[T any]() *List[T] { return new(List[T]).Init() } +// NewWithPool returns an initialized list, using a sync.Pool for list elements. +func NewWithPool[T any](pool *sync.Pool) *List[T] { + l := &List[T]{pool: pool} + return l.Init() +} + // Len returns the number of elements of list l. // The complexity is O(1). func (l *List[T]) Len() int { return l.len } @@ -101,7 +119,14 @@ func (l *List[T]) insert(e, at *Element[T]) *Element[T] { // insertValue is a convenience wrapper for insert(&Element{Value: v}, at). func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] { - return l.insert(&Element[T]{Value: v}, at) + var e *Element[T] + if l.pool != nil { + e = l.pool.Get().(*Element[T]) + } else { + e = &Element[T]{} + } + e.Value = v + return l.insert(e, at) } // remove removes e from its list, decrements l.len @@ -111,6 +136,9 @@ func (l *List[T]) remove(e *Element[T]) { e.next = nil // avoid memory leaks e.prev = nil // avoid memory leaks e.list = nil + if l.pool != nil { + l.pool.Put(e) + } l.len-- } @@ -132,12 +160,13 @@ func (l *List[T]) move(e, at *Element[T]) { // It returns the element value e.Value. // The element must not be nil. func (l *List[T]) Remove(e *Element[T]) T { + v := e.Value if e.list == l { // if e.list == l, l must have been initialized when e was inserted // in l or l == nil (e is a zero Element) and l.remove will crash l.remove(e) } - return e.Value + return v } // PushFront inserts a new element e with value v at the front of list l and returns e. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/log.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/log.go index 5707fab93..89b52c0d9 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/log.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/utils/log.go @@ -125,7 +125,7 @@ func readLoggingEnv() LogLevel { case "error": return LogLevelError default: - fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/Psiphon-Labs/quic-go/wiki/Logging") + fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/quic-go/quic-go/wiki/Logging") return LogLevelNothing } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/extended_header.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/extended_header.go index f176a06fb..424a14eb9 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/extended_header.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/extended_header.go @@ -2,6 +2,7 @@ package wire import ( "bytes" + "encoding/binary" "errors" "fmt" "io" @@ -42,12 +43,7 @@ func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil { return false, err } - var reservedBitsValid bool - if h.IsLongHeader { - reservedBitsValid, err = h.parseLongHeader(b, v) - } else { - reservedBitsValid, err = h.parseShortHeader(b, v) - } + reservedBitsValid, err := h.parseLongHeader(b, v) if err != nil { return false, err } @@ -65,21 +61,6 @@ func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumb return true, nil } -func (h *ExtendedHeader) parseShortHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) { - h.KeyPhase = protocol.KeyPhaseZero - if h.typeByte&0x4 > 0 { - h.KeyPhase = protocol.KeyPhaseOne - } - - if err := h.readPacketNumber(b); err != nil { - return false, err - } - if h.typeByte&0x18 != 0 { - return false, nil - } - return true, nil -} - func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error { h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1 switch h.PacketNumberLen { @@ -113,23 +94,17 @@ func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error { return nil } -// Write writes the Header. -func (h *ExtendedHeader) Write(b *bytes.Buffer, ver protocol.VersionNumber) error { +// Append appends the Header. +func (h *ExtendedHeader) Append(b []byte, v protocol.VersionNumber) ([]byte, error) { if h.DestConnectionID.Len() > protocol.MaxConnIDLen { - return fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len()) + return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len()) } if h.SrcConnectionID.Len() > protocol.MaxConnIDLen { - return fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len()) - } - if h.IsLongHeader { - return h.writeLongHeader(b, ver) + return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len()) } - return h.writeShortHeader(b, ver) -} -func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.VersionNumber) error { var packetType uint8 - if version == protocol.Version2 { + if v == protocol.Version2 { //nolint:exhaustive switch h.Type { case protocol.PacketTypeInitial: @@ -160,51 +135,25 @@ func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.Versi firstByte |= uint8(h.PacketNumberLen - 1) } - b.WriteByte(firstByte) - utils.BigEndian.WriteUint32(b, uint32(h.Version)) - b.WriteByte(uint8(h.DestConnectionID.Len())) - b.Write(h.DestConnectionID.Bytes()) - b.WriteByte(uint8(h.SrcConnectionID.Len())) - b.Write(h.SrcConnectionID.Bytes()) + b = append(b, firstByte) + b = append(b, make([]byte, 4)...) + binary.BigEndian.PutUint32(b[len(b)-4:], uint32(h.Version)) + b = append(b, uint8(h.DestConnectionID.Len())) + b = append(b, h.DestConnectionID.Bytes()...) + b = append(b, uint8(h.SrcConnectionID.Len())) + b = append(b, h.SrcConnectionID.Bytes()...) //nolint:exhaustive switch h.Type { case protocol.PacketTypeRetry: - b.Write(h.Token) - return nil + b = append(b, h.Token...) + return b, nil case protocol.PacketTypeInitial: - quicvarint.Write(b, uint64(len(h.Token))) - b.Write(h.Token) - } - quicvarint.WriteWithLen(b, uint64(h.Length), 2) - return h.writePacketNumber(b) -} - -func (h *ExtendedHeader) writeShortHeader(b *bytes.Buffer, _ protocol.VersionNumber) error { - typeByte := 0x40 | uint8(h.PacketNumberLen-1) - if h.KeyPhase == protocol.KeyPhaseOne { - typeByte |= byte(1 << 2) + b = quicvarint.Append(b, uint64(len(h.Token))) + b = append(b, h.Token...) } - - b.WriteByte(typeByte) - b.Write(h.DestConnectionID.Bytes()) - return h.writePacketNumber(b) -} - -func (h *ExtendedHeader) writePacketNumber(b *bytes.Buffer) error { - switch h.PacketNumberLen { - case protocol.PacketNumberLen1: - b.WriteByte(uint8(h.PacketNumber)) - case protocol.PacketNumberLen2: - utils.BigEndian.WriteUint16(b, uint16(h.PacketNumber)) - case protocol.PacketNumberLen3: - utils.BigEndian.WriteUint24(b, uint32(h.PacketNumber)) - case protocol.PacketNumberLen4: - utils.BigEndian.WriteUint32(b, uint32(h.PacketNumber)) - default: - return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen) - } - return nil + b = quicvarint.AppendWithLen(b, uint64(h.Length), 2) + return appendPacketNumber(b, h.PacketNumber, h.PacketNumberLen) } // ParsedLen returns the number of bytes that were consumed when parsing the header @@ -213,37 +162,49 @@ func (h *ExtendedHeader) ParsedLen() protocol.ByteCount { } // GetLength determines the length of the Header. -func (h *ExtendedHeader) GetLength(v protocol.VersionNumber) protocol.ByteCount { - if h.IsLongHeader { - length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */ - if h.Type == protocol.PacketTypeInitial { - length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token)) - } - return length +func (h *ExtendedHeader) GetLength(_ protocol.VersionNumber) protocol.ByteCount { + length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */ + if h.Type == protocol.PacketTypeInitial { + length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token)) } - - length := protocol.ByteCount(1 /* type byte */ + h.DestConnectionID.Len()) - length += protocol.ByteCount(h.PacketNumberLen) return length } // Log logs the Header func (h *ExtendedHeader) Log(logger utils.Logger) { - if h.IsLongHeader { - var token string - if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry { - if len(h.Token) == 0 { - token = "Token: (empty), " - } else { - token = fmt.Sprintf("Token: %#x, ", h.Token) - } - if h.Type == protocol.PacketTypeRetry { - logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version) - return - } + var token string + if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry { + if len(h.Token) == 0 { + token = "Token: (empty), " + } else { + token = fmt.Sprintf("Token: %#x, ", h.Token) } - logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version) - } else { - logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", h.DestConnectionID, h.PacketNumber, h.PacketNumberLen, h.KeyPhase) + if h.Type == protocol.PacketTypeRetry { + logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version) + return + } + } + logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version) +} + +func appendPacketNumber(b []byte, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen) ([]byte, error) { + switch pnLen { + case protocol.PacketNumberLen1: + b = append(b, uint8(pn)) + case protocol.PacketNumberLen2: + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, uint16(pn)) + b = append(b, buf...) + case protocol.PacketNumberLen3: + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(pn)) + b = append(b, buf[1:]...) + case protocol.PacketNumberLen4: + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(pn)) + b = append(b, buf...) + default: + return nil, fmt.Errorf("invalid packet number length: %d", pnLen) } + return b, nil } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go index 3bebb7249..a079b681b 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go @@ -16,31 +16,30 @@ type frameParser struct { ackDelayExponent uint8 supportsDatagrams bool - - version protocol.VersionNumber } +var _ FrameParser = &frameParser{} + // NewFrameParser creates a new frame parser. -func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParser { +func NewFrameParser(supportsDatagrams bool) *frameParser { return &frameParser{ r: *bytes.NewReader(nil), supportsDatagrams: supportsDatagrams, - version: v, } } // ParseNext parses the next frame. // It skips PADDING frames. -func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel) (int, Frame, error) { +func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (int, Frame, error) { startLen := len(data) p.r.Reset(data) - frame, err := p.parseNext(&p.r, encLevel) + frame, err := p.parseNext(&p.r, encLevel, v) n := startLen - p.r.Len() p.r.Reset(nil) return n, frame, err } -func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) { +func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) { for r.Len() != 0 { typeByte, _ := p.r.ReadByte() if typeByte == 0x0 { // PADDING frame @@ -48,7 +47,7 @@ func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLev } r.UnreadByte() - f, err := p.parseFrame(r, typeByte, encLevel) + f, err := p.parseFrame(r, typeByte, encLevel, v) if err != nil { return nil, &qerr.TransportError{ FrameType: uint64(typeByte), @@ -61,56 +60,56 @@ func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLev return nil, nil } -func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel) (Frame, error) { +func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) { var frame Frame var err error if typeByte&0xf8 == 0x8 { - frame, err = parseStreamFrame(r, p.version) + frame, err = parseStreamFrame(r, v) } else { switch typeByte { case 0x1: - frame, err = parsePingFrame(r, p.version) + frame, err = parsePingFrame(r, v) case 0x2, 0x3: ackDelayExponent := p.ackDelayExponent if encLevel != protocol.Encryption1RTT { ackDelayExponent = protocol.DefaultAckDelayExponent } - frame, err = parseAckFrame(r, ackDelayExponent, p.version) + frame, err = parseAckFrame(r, ackDelayExponent, v) case 0x4: - frame, err = parseResetStreamFrame(r, p.version) + frame, err = parseResetStreamFrame(r, v) case 0x5: - frame, err = parseStopSendingFrame(r, p.version) + frame, err = parseStopSendingFrame(r, v) case 0x6: - frame, err = parseCryptoFrame(r, p.version) + frame, err = parseCryptoFrame(r, v) case 0x7: - frame, err = parseNewTokenFrame(r, p.version) + frame, err = parseNewTokenFrame(r, v) case 0x10: - frame, err = parseMaxDataFrame(r, p.version) + frame, err = parseMaxDataFrame(r, v) case 0x11: - frame, err = parseMaxStreamDataFrame(r, p.version) + frame, err = parseMaxStreamDataFrame(r, v) case 0x12, 0x13: - frame, err = parseMaxStreamsFrame(r, p.version) + frame, err = parseMaxStreamsFrame(r, v) case 0x14: - frame, err = parseDataBlockedFrame(r, p.version) + frame, err = parseDataBlockedFrame(r, v) case 0x15: - frame, err = parseStreamDataBlockedFrame(r, p.version) + frame, err = parseStreamDataBlockedFrame(r, v) case 0x16, 0x17: - frame, err = parseStreamsBlockedFrame(r, p.version) + frame, err = parseStreamsBlockedFrame(r, v) case 0x18: - frame, err = parseNewConnectionIDFrame(r, p.version) + frame, err = parseNewConnectionIDFrame(r, v) case 0x19: - frame, err = parseRetireConnectionIDFrame(r, p.version) + frame, err = parseRetireConnectionIDFrame(r, v) case 0x1a: - frame, err = parsePathChallengeFrame(r, p.version) + frame, err = parsePathChallengeFrame(r, v) case 0x1b: - frame, err = parsePathResponseFrame(r, p.version) + frame, err = parsePathResponseFrame(r, v) case 0x1c, 0x1d: - frame, err = parseConnectionCloseFrame(r, p.version) + frame, err = parseConnectionCloseFrame(r, v) case 0x1e: - frame, err = parseHandshakeDoneFrame(r, p.version) + frame, err = parseHandshakeDoneFrame(r, v) case 0x30, 0x31: if p.supportsDatagrams { - frame, err = parseDatagramFrame(r, p.version) + frame, err = parseDatagramFrame(r, v) break } fallthrough diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go index 07012ade6..a2b34ab54 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go @@ -121,9 +121,8 @@ var ErrUnsupportedVersion = errors.New("unsupported version") // The Header is the version independent part of the header type Header struct { - IsLongHeader bool - typeByte byte - Type protocol.PacketType + typeByte byte + Type protocol.PacketType Version protocol.VersionNumber SrcConnectionID protocol.ConnectionID @@ -140,24 +139,22 @@ type Header struct { // If the packet has a long header, the packet is cut according to the length field. // If we understand the version, the packet is header up unto the packet number. // Otherwise, only the invariant part of the header is parsed. -func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* packet data */, []byte /* rest */, error) { - hdr, err := parseHeader(bytes.NewReader(data), shortHeaderConnIDLen) +func ParsePacket(data []byte) (*Header, []byte, []byte, error) { + if len(data) == 0 || !IsLongHeaderPacket(data[0]) { + return nil, nil, nil, errors.New("not a long header packet") + } + hdr, err := parseHeader(bytes.NewReader(data)) if err != nil { if err == ErrUnsupportedVersion { return hdr, nil, nil, ErrUnsupportedVersion } return nil, nil, nil, err } - var rest []byte - if hdr.IsLongHeader { - if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length { - return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length) - } - packetLen := int(hdr.ParsedLen() + hdr.Length) - rest = data[packetLen:] - data = data[:packetLen] + if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length { + return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length) } - return hdr, data, rest, nil + packetLen := int(hdr.ParsedLen() + hdr.Length) + return hdr, data[:packetLen], data[packetLen:], nil } // ParseHeader parses the header. @@ -165,43 +162,17 @@ func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* pack // For long header packets: // * if we understand the version: up to the packet number // * if not, only the invariant part of the header -func parseHeader(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) { +func parseHeader(b *bytes.Reader) (*Header, error) { startLen := b.Len() - h, err := parseHeaderImpl(b, shortHeaderConnIDLen) - if err != nil { - return h, err - } - h.parsedLen = protocol.ByteCount(startLen - b.Len()) - return h, err -} - -func parseHeaderImpl(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) { typeByte, err := b.ReadByte() if err != nil { return nil, err } - h := &Header{ - typeByte: typeByte, - IsLongHeader: IsLongHeaderPacket(typeByte), - } - - if !h.IsLongHeader { - if h.typeByte&0x40 == 0 { - return nil, errors.New("not a QUIC packet") - } - if err := h.parseShortHeader(b, shortHeaderConnIDLen); err != nil { - return nil, err - } - return h, nil - } - return h, h.parseLongHeader(b) -} - -func (h *Header) parseShortHeader(b *bytes.Reader, shortHeaderConnIDLen int) error { - var err error - h.DestConnectionID, err = protocol.ReadConnectionID(b, shortHeaderConnIDLen) - return err + h := &Header{typeByte: typeByte} + err = h.parseLongHeader(b) + h.parsedLen = protocol.ByteCount(startLen - b.Len()) + return h, err } func (h *Header) parseLongHeader(b *bytes.Reader) error { @@ -321,8 +292,5 @@ func (h *Header) toExtendedHeader() *ExtendedHeader { // PacketType is the type of the packet, for logging purposes func (h *Header) PacketType() string { - if h.IsLongHeader { - return h.Type.String() - } - return "1-RTT" + return h.Type.String() } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go index c9c27744f..ecf6de5ac 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go @@ -12,6 +12,6 @@ type Frame interface { // A FrameParser parses QUIC frames, one by one. type FrameParser interface { - ParseNext([]byte, protocol.EncryptionLevel) (int, Frame, error) + ParseNext([]byte, protocol.EncryptionLevel, protocol.VersionNumber) (int, Frame, error) SetAckDelayExponent(uint8) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go index 9d241e413..56ba9c079 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go @@ -9,6 +9,9 @@ import ( "github.com/Psiphon-Labs/quic-go/internal/utils" ) +// ParseShortHeader parses a short header packet. +// It must be called after header protection was removed. +// Otherwise, the check for the reserved bits will (most likely) fail. func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.PacketNumber, _ protocol.PacketNumberLen, _ protocol.KeyPhaseBit, _ error) { if len(data) == 0 { return 0, 0, 0, 0, io.EOF @@ -50,6 +53,21 @@ func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.Packet return 1 + connIDLen + int(pnLen), pn, pnLen, kp, err } +// AppendShortHeader writes a short header. +func AppendShortHeader(b []byte, connID protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) ([]byte, error) { + typeByte := 0x40 | uint8(pnLen-1) + if kp == protocol.KeyPhaseOne { + typeByte |= byte(1 << 2) + } + b = append(b, typeByte) + b = append(b, connID.Bytes()...) + return appendPacketNumber(b, pn, pnLen) +} + +func ShortHeaderLen(dest protocol.ConnectionID, pnLen protocol.PacketNumberLen) protocol.ByteCount { + return 1 + protocol.ByteCount(dest.Len()) + protocol.ByteCount(pnLen) +} + func LogShortHeader(logger utils.Logger, dest protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) { logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", dest, pn, pnLen, kp) } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go index d9ac5d391..e31147993 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go @@ -304,6 +304,9 @@ func (p *TransportParameters) readNumericTransportParameter( } p.MaxAckDelay = time.Duration(val) * time.Millisecond case activeConnectionIDLimitParameterID: + if val < 2 { + return fmt.Errorf("invalid value for active_connection_id_limit: %d (minimum 2)", val) + } p.ActiveConnectionIDLimit = val case maxDatagramFrameSizeParameterID: p.MaxDatagramFrameSize = protocol.ByteCount(val) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go index e2ad1592f..fbde48b88 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go @@ -7,10 +7,9 @@ import ( "net" "time" - "github.com/Psiphon-Labs/quic-go/internal/utils" - "github.com/Psiphon-Labs/quic-go/internal/protocol" "github.com/Psiphon-Labs/quic-go/internal/qerr" + "github.com/Psiphon-Labs/quic-go/internal/utils" "github.com/Psiphon-Labs/quic-go/internal/wire" ) @@ -121,7 +120,8 @@ type ConnectionTracer interface { SentTransportParameters(*TransportParameters) ReceivedTransportParameters(*TransportParameters) RestoredTransportParameters(parameters *TransportParameters) // for 0-RTT - SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) + SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) + SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) ReceivedRetry(*Header) ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go index d7166f1aa..8e85db494 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go @@ -104,9 +104,15 @@ func (m *connTracerMultiplexer) RestoredTransportParameters(tp *TransportParamet } } -func (m *connTracerMultiplexer) SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) { +func (m *connTracerMultiplexer) SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) { for _, t := range m.tracers { - t.SentPacket(hdr, size, ack, frames) + t.SentLongHeaderPacket(hdr, size, ack, frames) + } +} + +func (m *connTracerMultiplexer) SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame) { + for _, t := range m.tracers { + t.SentShortHeaderPacket(hdr, size, ack, frames) } } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go index 3103ae90a..38052ae3b 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go @@ -31,11 +31,12 @@ func (n NullConnectionTracer) StartedConnection(local, remote net.Addr, srcConnI func (n NullConnectionTracer) NegotiatedVersion(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) { } -func (n NullConnectionTracer) ClosedConnection(err error) {} -func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) SentPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {} +func (n NullConnectionTracer) ClosedConnection(err error) {} +func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) SentLongHeaderPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {} +func (n NullConnectionTracer) SentShortHeaderPacket(*ShortHeader, ByteCount, *AckFrame, []Frame) {} func (n NullConnectionTracer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) { } func (n NullConnectionTracer) ReceivedRetry(*Header) {} diff --git a/vendor/github.com/Psiphon-Labs/quic-go/logging/packet_header.go b/vendor/github.com/Psiphon-Labs/quic-go/logging/packet_header.go index 9ba5b00dc..a7e66004f 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/logging/packet_header.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/logging/packet_header.go @@ -6,9 +6,6 @@ import ( // PacketTypeFromHeader determines the packet type from a *wire.Header. func PacketTypeFromHeader(hdr *Header) PacketType { - if !hdr.IsLongHeader { - return PacketType1RTT - } if hdr.Version == 0 { return PacketTypeVersionNegotiation } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go b/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go index dec95fa8f..ab6088de8 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go @@ -117,7 +117,7 @@ func newPacketHandlerMap( // [Psiphon] // Do not emit alert to stderr (was log.Printf). - logger.Errorf("%s. See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err) + logger.Errorf("%s. See https://github.com/quic-go/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err) }) } } @@ -367,7 +367,7 @@ func (h *packetHandlerMap) listen() { //nolint:staticcheck // SA1019 ignore this! // TODO: This code is used to ignore wsa errors on Windows. // Since net.Error.Temporary is deprecated as of Go 1.18, we should find a better solution. - // See https://github.com/lucas-clemente/quic-go/issues/1737 for details. + // See https://github.com/quic-go/quic-go/issues/1737 for details. if nerr, ok := err.(net.Error); ok && nerr.Temporary() { h.logger.Debugf("Temporary error reading from conn: %w", err) continue diff --git a/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go b/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go index 0d24c0a11..6d1d1f3e9 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go @@ -1,7 +1,6 @@ package quic import ( - "bytes" "errors" "fmt" "net" @@ -15,15 +14,17 @@ import ( "github.com/Psiphon-Labs/quic-go/internal/wire" ) +var errNothingToPack = errors.New("nothing to pack") + type packer interface { - PackCoalescedPacket(onlyAck bool) (*coalescedPacket, error) - PackPacket(onlyAck bool) (*packedPacket, error) - MaybePackProbePacket(protocol.EncryptionLevel) (*packedPacket, error) - PackConnectionClose(*qerr.TransportError) (*coalescedPacket, error) - PackApplicationClose(*qerr.ApplicationError) (*coalescedPacket, error) + PackCoalescedPacket(onlyAck bool, v protocol.VersionNumber) (*coalescedPacket, error) + PackPacket(onlyAck bool, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) + MaybePackProbePacket(protocol.EncryptionLevel, protocol.VersionNumber) (*coalescedPacket, error) + PackConnectionClose(*qerr.TransportError, protocol.VersionNumber) (*coalescedPacket, error) + PackApplicationClose(*qerr.ApplicationError, protocol.VersionNumber) (*coalescedPacket, error) SetMaxPacketSize(protocol.ByteCount) - PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error) + PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) HandleTransportParameters(*wire.TransportParameters) SetToken([]byte) @@ -34,35 +35,39 @@ type sealer interface { } type payload struct { - frames []ackhandler.Frame + frames []*ackhandler.Frame ack *wire.AckFrame length protocol.ByteCount } -type packedPacket struct { - buffer *packetBuffer - *packetContents -} - -type packetContents struct { +type longHeaderPacket struct { header *wire.ExtendedHeader ack *wire.AckFrame - frames []ackhandler.Frame + frames []*ackhandler.Frame length protocol.ByteCount isMTUProbePacket bool } +type shortHeaderPacket struct { + *ackhandler.Packet + // used for logging + DestConnID protocol.ConnectionID + Ack *wire.AckFrame + PacketNumberLen protocol.PacketNumberLen + KeyPhase protocol.KeyPhaseBit +} + +func (p *shortHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.Frames) } + type coalescedPacket struct { - buffer *packetBuffer - packets []*packetContents + buffer *packetBuffer + longHdrPackets []*longHeaderPacket + shortHdrPacket *shortHeaderPacket } -func (p *packetContents) EncryptionLevel() protocol.EncryptionLevel { - if !p.header.IsLongHeader { - return protocol.Encryption1RTT - } +func (p *longHeaderPacket) EncryptionLevel() protocol.EncryptionLevel { //nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data). switch p.header.Type { case protocol.PacketTypeInitial: @@ -76,11 +81,9 @@ func (p *packetContents) EncryptionLevel() protocol.EncryptionLevel { } } -func (p *packetContents) IsAckEliciting() bool { - return ackhandler.HasAckElicitingFrames(p.frames) -} +func (p *longHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.frames) } -func (p *packetContents) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet { +func (p *longHeaderPacket) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet { largestAcked := protocol.InvalidPacketNumber if p.ack != nil { largestAcked = p.ack.LargestAcked() @@ -90,12 +93,13 @@ func (p *packetContents) ToAckHandlerPacket(now time.Time, q *retransmissionQueu if p.frames[i].OnLost != nil { continue } + //nolint:exhaustive // Short header packets are handled separately. switch encLevel { case protocol.EncryptionInitial: p.frames[i].OnLost = q.AddInitial case protocol.EncryptionHandshake: p.frames[i].OnLost = q.AddHandshake - case protocol.Encryption0RTT, protocol.Encryption1RTT: + case protocol.Encryption0RTT: p.frames[i].OnLost = q.AddAppData } } @@ -155,8 +159,8 @@ type sealingManager interface { type frameSource interface { HasData() bool - AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) + AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) + AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) } type ackFrameSource interface { @@ -168,7 +172,6 @@ type packetPacker struct { getDestConnID func() protocol.ConnectionID perspective protocol.Perspective - version protocol.VersionNumber cryptoSetup sealingManager initialStream cryptoStream @@ -205,7 +208,6 @@ func newPacketPacker( acks ackFrameSource, datagramQueue *datagramQueue, perspective protocol.Perspective, - version protocol.VersionNumber, ) *packetPacker { return &packetPacker{ cryptoSetup: cryptoSetup, @@ -216,7 +218,6 @@ func newPacketPacker( retransmissionQueue: retransmissionQueue, datagramQueue: datagramQueue, perspective: perspective, - version: version, framer: framer, acks: acks, pnManager: packetNumberManager, @@ -225,18 +226,18 @@ func newPacketPacker( } // PackConnectionClose packs a packet that closes the connection with a transport error. -func (p *packetPacker) PackConnectionClose(e *qerr.TransportError) (*coalescedPacket, error) { +func (p *packetPacker) PackConnectionClose(e *qerr.TransportError, v protocol.VersionNumber) (*coalescedPacket, error) { var reason string // don't send details of crypto errors if !e.ErrorCode.IsCryptoError() { reason = e.ErrorMessage } - return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason) + return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason, v) } // PackApplicationClose packs a packet that closes the connection with an application error. -func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError) (*coalescedPacket, error) { - return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage) +func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError, v protocol.VersionNumber) (*coalescedPacket, error) { + return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage, v) } func (p *packetPacker) packConnectionClose( @@ -244,12 +245,17 @@ func (p *packetPacker) packConnectionClose( errorCode uint64, frameType uint64, reason string, + v protocol.VersionNumber, ) (*coalescedPacket, error) { var sealers [4]sealer - var hdrs [4]*wire.ExtendedHeader - var payloads [4]*payload + var hdrs [3]*wire.ExtendedHeader + var payloads [4]payload var size protocol.ByteCount - var numPackets uint8 + var connID protocol.ConnectionID + var oneRTTPacketNumber protocol.PacketNumber + var oneRTTPacketNumberLen protocol.PacketNumberLen + var keyPhase protocol.KeyPhaseBit // only set for 1-RTT + var numLongHdrPackets uint8 encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT} for i, encLevel := range encLevels { if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT { @@ -267,14 +273,13 @@ func (p *packetPacker) packConnectionClose( ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode) ccf.ReasonPhrase = "" } - payload := &payload{ - frames: []ackhandler.Frame{{Frame: ccf}}, - length: ccf.Length(p.version), + pl := payload{ + frames: []*ackhandler.Frame{{Frame: ccf}}, + length: ccf.Length(v), } var sealer sealer var err error - var keyPhase protocol.KeyPhaseBit // only set for 1-RTT switch encLevel { case protocol.EncryptionInitial: sealer, err = p.cryptoSetup.GetInitialSealer() @@ -299,17 +304,22 @@ func (p *packetPacker) packConnectionClose( sealers[i] = sealer var hdr *wire.ExtendedHeader if encLevel == protocol.Encryption1RTT { - hdr = p.getShortHeader(keyPhase) + connID = p.getDestConnID() + oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, pl) } else { - hdr = p.getLongHeader(encLevel) + hdr = p.getLongHeader(encLevel, v) + hdrs[i] = hdr + size += p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead()) + numLongHdrPackets++ } - hdrs[i] = hdr - payloads[i] = payload - size += p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead()) - numPackets++ + payloads[i] = pl } - contents := make([]*packetContents, 0, numPackets) buffer := getPacketBuffer() + packet := &coalescedPacket{ + buffer: buffer, + longHdrPackets: make([]*longHeaderPacket, 0, numLongHdrPackets), + } for i, encLevel := range encLevels { if sealers[i] == nil { continue @@ -318,29 +328,54 @@ func (p *packetPacker) packConnectionClose( if encLevel == protocol.EncryptionInitial { paddingLen = p.initialPaddingLen(payloads[i].frames, size) } - c, err := p.appendPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], false) - if err != nil { - return nil, err + if encLevel == protocol.Encryption1RTT { + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], paddingLen, sealers[i], false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: oneRTTPacketNumberLen, + KeyPhase: keyPhase, + } + } else { + longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) } - contents = append(contents, c) } - return &coalescedPacket{buffer: buffer, packets: contents}, nil + return packet, nil } -// packetLength calculates the length of the serialized packet. +// longHeaderPacketLength calculates the length of a serialized long header packet. // It takes into account that packets that have a tiny payload need to be padded, // such that len(payload) + packet number len >= 4 + AEAD overhead -func (p *packetPacker) packetLength(hdr *wire.ExtendedHeader, payload *payload) protocol.ByteCount { +func (p *packetPacker) longHeaderPacketLength(hdr *wire.ExtendedHeader, pl payload, v protocol.VersionNumber) protocol.ByteCount { var paddingLen protocol.ByteCount pnLen := protocol.ByteCount(hdr.PacketNumberLen) - if payload.length < 4-pnLen { - paddingLen = 4 - pnLen - payload.length + if pl.length < 4-pnLen { + paddingLen = 4 - pnLen - pl.length + } + return hdr.GetLength(v) + pl.length + paddingLen +} + +// shortHeaderPacketLength calculates the length of a serialized short header packet. +// It takes into account that packets that have a tiny payload need to be padded, +// such that len(payload) + packet number len >= 4 + AEAD overhead +func (p *packetPacker) shortHeaderPacketLength(connID protocol.ConnectionID, pnLen protocol.PacketNumberLen, pl payload) protocol.ByteCount { + var paddingLen protocol.ByteCount + if pl.length < 4-protocol.ByteCount(pnLen) { + paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length } - return hdr.GetLength(p.version) + payload.length + paddingLen + return wire.ShortHeaderLen(connID, pnLen) + pl.length + paddingLen } // size is the expected size of the packet, if no padding was applied. -func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount { +func (p *packetPacker) initialPaddingLen(frames []*ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount { // For the server, only ack-eliciting Initial packets need to be padded. if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) { return 0 @@ -354,14 +389,17 @@ func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protoco // PackCoalescedPacket packs a new packet. // It packs an Initial / Handshake if there is data to send in these packet number spaces. // It should only be called before the handshake is confirmed. -func (p *packetPacker) PackCoalescedPacket(onlyAck bool) (*coalescedPacket, error) { +func (p *packetPacker) PackCoalescedPacket(onlyAck bool, v protocol.VersionNumber) (*coalescedPacket, error) { maxPacketSize := p.maxPacketSize if p.perspective == protocol.PerspectiveClient { maxPacketSize = protocol.MinInitialPacketSize } - var initialHdr, handshakeHdr, appDataHdr *wire.ExtendedHeader - var initialPayload, handshakePayload, appDataPayload *payload - var numPackets int + var ( + initialHdr, handshakeHdr, zeroRTTHdr *wire.ExtendedHeader + initialPayload, handshakePayload, zeroRTTPayload, oneRTTPayload payload + oneRTTPacketNumber protocol.PacketNumber + oneRTTPacketNumberLen protocol.PacketNumberLen + ) // Try packing an Initial packet. initialSealer, err := p.cryptoSetup.GetInitialSealer() if err != nil && err != handshake.ErrKeysDropped { @@ -369,10 +407,9 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool) (*coalescedPacket, erro } var size protocol.ByteCount if initialSealer != nil { - initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true) - if initialPayload != nil { - size += p.packetLength(initialHdr, initialPayload) + protocol.ByteCount(initialSealer.Overhead()) - numPackets++ + initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true, v) + if initialPayload.length > 0 { + size += p.longHeaderPacketLength(initialHdr, initialPayload, v) + protocol.ByteCount(initialSealer.Overhead()) } } @@ -385,107 +422,133 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool) (*coalescedPacket, erro return nil, err } if handshakeSealer != nil { - handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0) - if handshakePayload != nil { - s := p.packetLength(handshakeHdr, handshakePayload) + protocol.ByteCount(handshakeSealer.Overhead()) + handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0, v) + if handshakePayload.length > 0 { + s := p.longHeaderPacketLength(handshakeHdr, handshakePayload, v) + protocol.ByteCount(handshakeSealer.Overhead()) size += s - numPackets++ } } } // Add a 0-RTT / 1-RTT packet. - var appDataSealer sealer - appDataEncLevel := protocol.Encryption1RTT + var zeroRTTSealer sealer + var oneRTTSealer handshake.ShortHeaderSealer + var connID protocol.ConnectionID + var kp protocol.KeyPhaseBit if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { - var sErr error - var oneRTTSealer handshake.ShortHeaderSealer - oneRTTSealer, sErr = p.cryptoSetup.Get1RTTSealer() - appDataSealer = oneRTTSealer - if sErr != nil && p.perspective == protocol.PerspectiveClient { - appDataSealer, sErr = p.cryptoSetup.Get0RTTSealer() - appDataEncLevel = protocol.Encryption0RTT - } - if appDataSealer != nil && sErr == nil { - //nolint:exhaustive // 0-RTT and 1-RTT are the only two application data encryption levels. - switch appDataEncLevel { - case protocol.Encryption0RTT: - appDataHdr, appDataPayload = p.maybeGetAppDataPacketFor0RTT(appDataSealer, maxPacketSize-size) - case protocol.Encryption1RTT: - appDataHdr, appDataPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, maxPacketSize-size, onlyAck, size == 0) + var err error + oneRTTSealer, err = p.cryptoSetup.Get1RTTSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if err == nil { // 1-RTT + kp = oneRTTSealer.KeyPhase() + connID = p.getDestConnID() + oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen) + oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxPacketSize-size, onlyAck, size == 0, v) + if oneRTTPayload.length > 0 { + size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead()) } - if appDataHdr != nil && appDataPayload != nil { - size += p.packetLength(appDataHdr, appDataPayload) + protocol.ByteCount(appDataSealer.Overhead()) - numPackets++ + } else if p.perspective == protocol.PerspectiveClient { // 0-RTT + var err error + zeroRTTSealer, err = p.cryptoSetup.Get0RTTSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if zeroRTTSealer != nil { + zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxPacketSize-size, v) + if zeroRTTPayload.length > 0 { + size += p.longHeaderPacketLength(zeroRTTHdr, zeroRTTPayload, v) + protocol.ByteCount(zeroRTTSealer.Overhead()) + } } } } - if numPackets == 0 { + if initialPayload.length == 0 && handshakePayload.length == 0 && zeroRTTPayload.length == 0 && oneRTTPayload.length == 0 { return nil, nil } buffer := getPacketBuffer() packet := &coalescedPacket{ - buffer: buffer, - packets: make([]*packetContents, 0, numPackets), + buffer: buffer, + longHdrPackets: make([]*longHeaderPacket, 0, 3), } - if initialPayload != nil { + if initialPayload.length > 0 { padding := p.initialPaddingLen(initialPayload.frames, size) - cont, err := p.appendPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, false) + cont, err := p.appendLongHeaderPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, v) if err != nil { return nil, err } - packet.packets = append(packet.packets, cont) + packet.longHdrPackets = append(packet.longHdrPackets, cont) } - if handshakePayload != nil { - cont, err := p.appendPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, false) + if handshakePayload.length > 0 { + cont, err := p.appendLongHeaderPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, v) if err != nil { return nil, err } - packet.packets = append(packet.packets, cont) + packet.longHdrPackets = append(packet.longHdrPackets, cont) } - if appDataPayload != nil { - cont, err := p.appendPacket(buffer, appDataHdr, appDataPayload, 0, appDataEncLevel, appDataSealer, false) + if zeroRTTPayload.length > 0 { + longHdrPacket, err := p.appendLongHeaderPacket(buffer, zeroRTTHdr, zeroRTTPayload, 0, protocol.Encryption0RTT, zeroRTTSealer, v) if err != nil { return nil, err } - packet.packets = append(packet.packets, cont) + packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) + } else if oneRTTPayload.length > 0 { + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, oneRTTSealer, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: oneRTTPacketNumberLen, + KeyPhase: kp, + } } return packet, nil } // PackPacket packs a packet in the application data packet number space. // It should be called after the handshake is confirmed. -func (p *packetPacker) PackPacket(onlyAck bool) (*packedPacket, error) { +func (p *packetPacker) PackPacket(onlyAck bool, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) { sealer, err := p.cryptoSetup.Get1RTTSealer() if err != nil { - return nil, err + return shortHeaderPacket{}, nil, err } - hdr, payload := p.maybeGetShortHeaderPacket(sealer, p.maxPacketSize, onlyAck, true) - if payload == nil { - return nil, nil + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + connID := p.getDestConnID() + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, p.maxPacketSize, onlyAck, true, v) + if pl.length == 0 { + return shortHeaderPacket{}, nil, errNothingToPack } + kp := sealer.KeyPhase() buffer := getPacketBuffer() - cont, err := p.appendPacket(buffer, hdr, payload, 0, protocol.Encryption1RTT, sealer, false) + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, sealer, false, v) if err != nil { - return nil, err + return shortHeaderPacket{}, nil, err } - return &packedPacket{ - buffer: buffer, - packetContents: cont, - }, nil + return shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, buffer, nil } -func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool) (*wire.ExtendedHeader, *payload) { +func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool, v protocol.VersionNumber) (*wire.ExtendedHeader, payload) { if onlyAck { if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { - var payload payload - payload.ack = ack - payload.length = ack.Length(p.version) - return p.getLongHeader(encLevel), &payload + return p.getLongHeader(encLevel, v), payload{ + ack: ack, + length: ack.Length(v), + } } - return nil, nil + return nil, payload{} } var s cryptoStream @@ -507,75 +570,77 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en } if !hasData && !hasRetransmission && ack == nil { // nothing to send - return nil, nil + return nil, payload{} } - var payload payload + var pl payload if ack != nil { - payload.ack = ack - payload.length = ack.Length(p.version) - maxPacketSize -= payload.length + pl.ack = ack + pl.length = ack.Length(v) + maxPacketSize -= pl.length } - hdr := p.getLongHeader(encLevel) - maxPacketSize -= hdr.GetLength(p.version) + hdr := p.getLongHeader(encLevel, v) + maxPacketSize -= hdr.GetLength(v) if hasRetransmission { for { var f wire.Frame //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s switch encLevel { case protocol.EncryptionInitial: - f = p.retransmissionQueue.GetInitialFrame(maxPacketSize) + f = p.retransmissionQueue.GetInitialFrame(maxPacketSize, v) case protocol.EncryptionHandshake: - f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize) + f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize, v) } if f == nil { break } - payload.frames = append(payload.frames, ackhandler.Frame{Frame: f}) - frameLen := f.Length(p.version) - payload.length += frameLen + af := ackhandler.GetFrame() + af.Frame = f + pl.frames = append(pl.frames, af) + frameLen := f.Length(v) + pl.length += frameLen maxPacketSize -= frameLen } } else if s.HasData() { cf := s.PopCryptoFrame(maxPacketSize) - payload.frames = []ackhandler.Frame{{Frame: cf}} - payload.length += cf.Length(p.version) + pl.frames = []*ackhandler.Frame{{Frame: cf}} + pl.length += cf.Length(v) } - return hdr, &payload + return hdr, pl } -func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount) (*wire.ExtendedHeader, *payload) { +func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount, v protocol.VersionNumber) (*wire.ExtendedHeader, payload) { if p.perspective != protocol.PerspectiveClient { - return nil, nil + return nil, payload{} } - hdr := p.getLongHeader(protocol.Encryption0RTT) - maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead()) - payload := p.maybeGetAppDataPacket(maxPayloadSize, false, false) - return hdr, payload + hdr := p.getLongHeader(protocol.Encryption0RTT, v) + maxPayloadSize := maxPacketSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead()) + return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, v) } -func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool) (*wire.ExtendedHeader, *payload) { - hdr := p.getShortHeader(sealer.KeyPhase()) - maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead()) - payload := p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed) - return hdr, payload +func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, hdrLen protocol.ByteCount, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload { + maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead()) + return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, v) } -func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool) *payload { - payload := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed) +func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload { + pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, v) // check if we have anything to send - if len(payload.frames) == 0 { - if payload.ack == nil { - return nil + if len(pl.frames) == 0 { + if pl.ack == nil { + return payload{} } // the packet only contains an ACK if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks { ping := &wire.PingFrame{} // don't retransmit the PING frame when it is lost - payload.frames = append(payload.frames, ackhandler.Frame{Frame: ping, OnLost: func(wire.Frame) {}}) - payload.length += ping.Length(p.version) + af := ackhandler.GetFrame() + af.Frame = ping + af.OnLost = func(wire.Frame) {} + pl.frames = append(pl.frames, af) + pl.length += ping.Length(v) p.numNonAckElicitingAcks = 0 } else { p.numNonAckElicitingAcks++ @@ -583,21 +648,21 @@ func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, } else { p.numNonAckElicitingAcks = 0 } - return payload + return pl } -func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool) *payload { +func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload { if onlyAck { if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { - payload := &payload{} - payload.ack = ack - payload.length += ack.Length(p.version) - return payload + return payload{ + ack: ack, + length: ack.Length(v), + } } - return &payload{} + return payload{} } - payload := &payload{frames: make([]ackhandler.Frame, 0, 1)} + pl := payload{frames: make([]*ackhandler.Frame, 0, 1)} hasData := p.framer.HasData() hasRetransmission := p.retransmissionQueue.HasAppData() @@ -605,61 +670,92 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc var hasAck bool if ackAllowed { if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { - payload.ack = ack - payload.length += ack.Length(p.version) + pl.ack = ack + pl.length += ack.Length(v) hasAck = true } } if p.datagramQueue != nil { if f := p.datagramQueue.Peek(); f != nil { - size := f.Length(p.version) - if size <= maxFrameSize-payload.length { - payload.frames = append(payload.frames, ackhandler.Frame{ - Frame: f, - // set it to a no-op. Then we won't set the default callback, which would retransmit the frame. - OnLost: func(wire.Frame) {}, - }) - payload.length += size + size := f.Length(v) + if size <= maxFrameSize-pl.length { + af := ackhandler.GetFrame() + af.Frame = f + // set it to a no-op. Then we won't set the default callback, which would retransmit the frame. + af.OnLost = func(wire.Frame) {} + pl.frames = append(pl.frames, af) + pl.length += size p.datagramQueue.Pop() } } } if hasAck && !hasData && !hasRetransmission { - return payload + return pl } if hasRetransmission { for { - remainingLen := maxFrameSize - payload.length + remainingLen := maxFrameSize - pl.length if remainingLen < protocol.MinStreamFrameSize { break } - f := p.retransmissionQueue.GetAppDataFrame(remainingLen) + f := p.retransmissionQueue.GetAppDataFrame(remainingLen, v) if f == nil { break } - payload.frames = append(payload.frames, ackhandler.Frame{Frame: f}) - payload.length += f.Length(p.version) + af := ackhandler.GetFrame() + af.Frame = f + pl.frames = append(pl.frames, af) + pl.length += f.Length(v) } } if hasData { var lengthAdded protocol.ByteCount - payload.frames, lengthAdded = p.framer.AppendControlFrames(payload.frames, maxFrameSize-payload.length) - payload.length += lengthAdded + pl.frames, lengthAdded = p.framer.AppendControlFrames(pl.frames, maxFrameSize-pl.length, v) + pl.length += lengthAdded - payload.frames, lengthAdded = p.framer.AppendStreamFrames(payload.frames, maxFrameSize-payload.length) - payload.length += lengthAdded + pl.frames, lengthAdded = p.framer.AppendStreamFrames(pl.frames, maxFrameSize-pl.length, v) + pl.length += lengthAdded } - return payload + return pl } -func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) (*packedPacket, error) { +func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (*coalescedPacket, error) { + if encLevel == protocol.Encryption1RTT { + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return nil, err + } + kp := s.KeyPhase() + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetAppDataPacket(p.maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, v) + if pl.length == 0 { + return nil, nil + } + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, s, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: pnLen, + KeyPhase: kp, + } + return packet, nil + } + var hdr *wire.ExtendedHeader - var payload *payload - var sealer sealer + var pl payload + var sealer handshake.LongHeaderSealer //nolint:exhaustive // Probe packets are never sent for 0-RTT. switch encLevel { case protocol.EncryptionInitial: @@ -668,85 +764,71 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) ( if err != nil { return nil, err } - hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true) + hdr, pl = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true, v) case protocol.EncryptionHandshake: var err error sealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil { return nil, err } - hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true) - case protocol.Encryption1RTT: - oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer() - if err != nil { - return nil, err - } - sealer = oneRTTSealer - hdr = p.getShortHeader(oneRTTSealer.KeyPhase()) - payload = p.maybeGetAppDataPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead())-hdr.GetLength(p.version), false, true) + hdr, pl = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true, v) default: panic("unknown encryption level") } - if payload == nil { + + if pl.length == 0 { return nil, nil } - size := p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead()) + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + size := p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead()) var padding protocol.ByteCount if encLevel == protocol.EncryptionInitial { - padding = p.initialPaddingLen(payload.frames, size) + padding = p.initialPaddingLen(pl.frames, size) } - buffer := getPacketBuffer() - cont, err := p.appendPacket(buffer, hdr, payload, padding, encLevel, sealer, false) + + longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdr, pl, padding, encLevel, sealer, v) if err != nil { return nil, err } - return &packedPacket{ - buffer: buffer, - packetContents: cont, - }, nil + packet.longHdrPackets = []*longHeaderPacket{longHdrPacket} + return packet, nil } -func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error) { - payload := &payload{ - frames: []ackhandler.Frame{ping}, - length: ping.Length(p.version), +func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) { + pl := payload{ + frames: []*ackhandler.Frame{&ping}, + length: ping.Length(v), } buffer := getPacketBuffer() - sealer, err := p.cryptoSetup.Get1RTTSealer() + s, err := p.cryptoSetup.Get1RTTSealer() if err != nil { - return nil, err + return shortHeaderPacket{}, nil, err } - hdr := p.getShortHeader(sealer.KeyPhase()) - padding := size - p.packetLength(hdr, payload) - protocol.ByteCount(sealer.Overhead()) - contents, err := p.appendPacket(buffer, hdr, payload, padding, protocol.Encryption1RTT, sealer, true) + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + padding := size - p.shortHeaderPacketLength(connID, pnLen, pl) - protocol.ByteCount(s.Overhead()) + kp := s.KeyPhase() + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, padding, s, true, v) if err != nil { - return nil, err + return shortHeaderPacket{}, nil, err } - contents.isMTUProbePacket = true - return &packedPacket{ - buffer: buffer, - packetContents: contents, - }, nil -} - -func (p *packetPacker) getShortHeader(kp protocol.KeyPhaseBit) *wire.ExtendedHeader { - pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) - hdr := &wire.ExtendedHeader{} - hdr.PacketNumber = pn - hdr.PacketNumberLen = pnLen - hdr.DestConnectionID = p.getDestConnID() - hdr.KeyPhase = kp - return hdr + return shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, buffer, nil } -func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel) *wire.ExtendedHeader { +func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel, v protocol.VersionNumber) *wire.ExtendedHeader { pn, pnLen := p.pnManager.PeekPacketNumber(encLevel) hdr := &wire.ExtendedHeader{ PacketNumber: pn, PacketNumberLen: pnLen, } - hdr.IsLongHeader = true - hdr.Version = p.version + hdr.Version = v hdr.SrcConnectionID = p.srcConnID hdr.DestConnectionID = p.getDestConnID() @@ -763,28 +845,114 @@ func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel) *wire.Ex return hdr } -func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedHeader, payload *payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, isMTUProbePacket bool) (*packetContents, error) { +func (p *packetPacker) appendLongHeaderPacket(buffer *packetBuffer, header *wire.ExtendedHeader, pl payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, v protocol.VersionNumber) (*longHeaderPacket, error) { var paddingLen protocol.ByteCount pnLen := protocol.ByteCount(header.PacketNumberLen) - if payload.length < 4-pnLen { - paddingLen = 4 - pnLen - payload.length + if pl.length < 4-pnLen { + paddingLen = 4 - pnLen - pl.length } paddingLen += padding - if header.IsLongHeader { - header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + payload.length + paddingLen + header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + pl.length + paddingLen + + startLen := len(buffer.Data) + raw := buffer.Data[startLen:] + raw, err := header.Append(raw, v) + if err != nil { + return nil, err + } + payloadOffset := protocol.ByteCount(len(raw)) + + pn := p.pnManager.PopPacketNumber(encLevel) + if pn != header.PacketNumber { + return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") } - hdrOffset := buffer.Len() - buf := bytes.NewBuffer(buffer.Data) - if err := header.Write(buf, p.version); err != nil { + raw, err = p.appendPacketPayload(raw, pl, paddingLen, v) + if err != nil { return nil, err } - payloadOffset := buf.Len() - raw := buffer.Data[:payloadOffset] + raw = p.encryptPacket(raw, sealer, pn, payloadOffset, pnLen) + buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)] + + return &longHeaderPacket{ + header: header, + ack: pl.ack, + frames: pl.frames, + length: protocol.ByteCount(len(raw)), + }, nil +} + +func (p *packetPacker) appendShortHeaderPacket( + buffer *packetBuffer, + connID protocol.ConnectionID, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + pl payload, + padding protocol.ByteCount, + sealer sealer, + isMTUProbePacket bool, + v protocol.VersionNumber, +) (*ackhandler.Packet, *wire.AckFrame, error) { + var paddingLen protocol.ByteCount + if pl.length < 4-protocol.ByteCount(pnLen) { + paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length + } + paddingLen += padding - if payload.ack != nil { + startLen := len(buffer.Data) + raw := buffer.Data[startLen:] + raw, err := wire.AppendShortHeader(raw, connID, pn, pnLen, kp) + if err != nil { + return nil, nil, err + } + payloadOffset := protocol.ByteCount(len(raw)) + + if pn != p.pnManager.PopPacketNumber(protocol.Encryption1RTT) { + return nil, nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") + } + + raw, err = p.appendPacketPayload(raw, pl, paddingLen, v) + if err != nil { + return nil, nil, err + } + if !isMTUProbePacket { + if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > p.maxPacketSize { + return nil, nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize) + } + } + raw = p.encryptPacket(raw, sealer, pn, payloadOffset, protocol.ByteCount(pnLen)) + buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)] + + // create the ackhandler.Packet + largestAcked := protocol.InvalidPacketNumber + if pl.ack != nil { + largestAcked = pl.ack.LargestAcked() + } + for i := range pl.frames { + if pl.frames[i].OnLost != nil { + continue + } + pl.frames[i].OnLost = p.retransmissionQueue.AddAppData + } + + ap := ackhandler.GetPacket() + ap.PacketNumber = pn + ap.LargestAcked = largestAcked + ap.Frames = pl.frames + ap.Length = protocol.ByteCount(len(raw)) + ap.EncryptionLevel = protocol.Encryption1RTT + ap.SendTime = time.Now() + ap.IsPathMTUProbePacket = isMTUProbePacket + + return ap, pl.ack, nil +} + +func (p *packetPacker) appendPacketPayload(raw []byte, pl payload, paddingLen protocol.ByteCount, v protocol.VersionNumber) ([]byte, error) { + payloadOffset := len(raw) + if pl.ack != nil { var err error - raw, err = payload.ack.Append(raw, p.version) + raw, err = pl.ack.Append(raw, v) if err != nil { return nil, err } @@ -792,41 +960,27 @@ func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedH if paddingLen > 0 { raw = append(raw, make([]byte, paddingLen)...) } - for _, frame := range payload.frames { + for _, frame := range pl.frames { var err error - raw, err = frame.Append(raw, p.version) + raw, err = frame.Append(raw, v) if err != nil { return nil, err } } - if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != payload.length { - return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", payload.length, payloadSize) - } - if !isMTUProbePacket { - if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > p.maxPacketSize { - return nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize) - } + if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != pl.length { + return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", pl.length, payloadSize) } + return raw, nil +} - // encrypt the packet - _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], header.PacketNumber, raw[hdrOffset:payloadOffset]) - raw = raw[0 : len(raw)+sealer.Overhead()] +func (p *packetPacker) encryptPacket(raw []byte, sealer sealer, pn protocol.PacketNumber, payloadOffset, pnLen protocol.ByteCount) []byte { + _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], pn, raw[:payloadOffset]) + raw = raw[:len(raw)+sealer.Overhead()] // apply header protection - pnOffset := payloadOffset - int(header.PacketNumberLen) - sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[hdrOffset], raw[pnOffset:payloadOffset]) - buffer.Data = raw - - num := p.pnManager.PopPacketNumber(encLevel) - if num != header.PacketNumber { - return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") - } - return &packetContents{ - header: header, - ack: payload.ack, - frames: payload.frames, - length: buffer.Len() - hdrOffset, - }, nil + pnOffset := payloadOffset - pnLen + sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[0], raw[pnOffset:payloadOffset]) + return raw } func (p *packetPacker) SetToken(token []byte) { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go b/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go index d790aa050..dc9e208b4 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go @@ -38,16 +38,14 @@ type packetUnpacker struct { cs handshake.CryptoSetup shortHdrConnIDLen int - version protocol.VersionNumber } var _ unpacker = &packetUnpacker{} -func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version protocol.VersionNumber) unpacker { +func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetUnpacker { return &packetUnpacker{ cs: cs, shortHdrConnIDLen: shortHdrConnIDLen, - version: version, } } @@ -55,7 +53,7 @@ func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version // If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits. // If any other error occurred when parsing the header, the error is of type headerParseError. // If decrypting the payload fails for any reason, the error is the error returned by the AEAD. -func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) { +func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error) { var encLevel protocol.EncryptionLevel var extHdr *wire.ExtendedHeader var decrypted []byte @@ -67,7 +65,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) if err != nil { return nil, err } @@ -77,7 +75,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) if err != nil { return nil, err } @@ -87,7 +85,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) if err != nil { return nil, err } @@ -127,8 +125,8 @@ func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (prot return pn, pnLen, kp, decrypted, nil } -func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) { - extHdr, parseErr := u.unpackLongHeader(opener, hdr, data) +func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, []byte, error) { + extHdr, parseErr := u.unpackLongHeader(opener, hdr, data, v) // If the reserved bits are set incorrectly, we still need to continue unpacking. // This avoids a timing side-channel, which otherwise might allow an attacker // to gain information about the header encryption. @@ -189,15 +187,15 @@ func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int } // The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError. -func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { - extHdr, err := unpackLongHeader(hd, hdr, data, u.version) +func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) { + extHdr, err := unpackLongHeader(hd, hdr, data, v) if err != nil && err != wire.ErrInvalidReservedBits { return nil, &headerParseError{err: err} } return extHdr, err } -func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version protocol.VersionNumber) (*wire.ExtendedHeader, error) { +func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) { r := bytes.NewReader(data) hdrLen := hdr.ParsedLen() @@ -216,7 +214,7 @@ func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version data[hdrLen:hdrLen+4], ) // 3. parse the header (and learn the actual length of the packet number) - extHdr, parseErr := hdr.ParseExtended(r, version) + extHdr, parseErr := hdr.ParseExtended(r, v) if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { return nil, parseErr } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go b/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go index 297c6b9d0..25d2074c3 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go @@ -107,32 +107,32 @@ func Append(b []byte, i uint64) []byte { panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) } -// WriteWithLen writes i in the QUIC varint format with the desired length to w. -func WriteWithLen(w Writer, i uint64, length protocol.ByteCount) { +// AppendWithLen append i in the QUIC varint format with the desired length. +func AppendWithLen(b []byte, i uint64, length protocol.ByteCount) []byte { if length != 1 && length != 2 && length != 4 && length != 8 { panic("invalid varint length") } l := Len(i) if l == length { - Write(w, i) - return + return Append(b, i) } if l > length { panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length)) } if length == 2 { - w.WriteByte(0b01000000) + b = append(b, 0b01000000) } else if length == 4 { - w.WriteByte(0b10000000) + b = append(b, 0b10000000) } else if length == 8 { - w.WriteByte(0b11000000) + b = append(b, 0b11000000) } for j := protocol.ByteCount(1); j < length-l; j++ { - w.WriteByte(0) + b = append(b, 0) } for j := protocol.ByteCount(0); j < l; j++ { - w.WriteByte(uint8(i >> (8 * (l - 1 - j)))) + b = append(b, uint8(i>>(8*(l-1-j)))) } + return b } // Len determines the number of bytes that will be needed to write the number i. diff --git a/vendor/github.com/Psiphon-Labs/quic-go/receive_stream.go b/vendor/github.com/Psiphon-Labs/quic-go/receive_stream.go index ffd95a624..808dbc433 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/receive_stream.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/receive_stream.go @@ -44,14 +44,13 @@ type receiveStream struct { closedForShutdown bool // set when CloseForShutdown() is called finRead bool // set once we read a frame with a Fin canceledRead bool // set when CancelRead() is called - resetRemotely bool // set when HandleResetStreamFrame() is called + resetRemotely bool // set when handleResetStreamFrame() is called readChan chan struct{} readOnce chan struct{} // cap: 1, to protect against concurrent use of Read deadline time.Time flowController flowcontrol.StreamFlowController - version protocol.VersionNumber } var ( @@ -63,7 +62,6 @@ func newReceiveStream( streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, ) *receiveStream { return &receiveStream{ streamID: streamID, @@ -73,7 +71,6 @@ func newReceiveStream( readChan: make(chan struct{}, 1), readOnce: make(chan struct{}, 1), finalOffset: protocol.MaxByteCount, - version: version, } } @@ -218,7 +215,7 @@ func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) bool /* c return false } s.canceledRead = true - s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode) + s.cancelReadErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: false} s.signalRead() s.sender.queueControlFrame(&wire.StopSendingFrame{ StreamID: s.streamID, @@ -290,6 +287,7 @@ func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) s.resetRemotelyErr = &StreamError{ StreamID: s.streamID, ErrorCode: frame.ErrorCode, + Remote: true, } s.signalRead() return newlyRcvdFinalOffset, nil diff --git a/vendor/github.com/Psiphon-Labs/quic-go/retransmission_queue.go b/vendor/github.com/Psiphon-Labs/quic-go/retransmission_queue.go index 22eb6091c..5b44917f3 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/retransmission_queue.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/retransmission_queue.go @@ -15,12 +15,10 @@ type retransmissionQueue struct { handshakeCryptoData []*wire.CryptoFrame appData []wire.Frame - - version protocol.VersionNumber } -func newRetransmissionQueue(ver protocol.VersionNumber) *retransmissionQueue { - return &retransmissionQueue{version: ver} +func newRetransmissionQueue() *retransmissionQueue { + return &retransmissionQueue{} } func (q *retransmissionQueue) AddInitial(f wire.Frame) { @@ -58,10 +56,10 @@ func (q *retransmissionQueue) AddAppData(f wire.Frame) { q.appData = append(q.appData, f) } -func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Frame { +func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame { if len(q.initialCryptoData) > 0 { f := q.initialCryptoData[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version) + newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) if newFrame == nil && !needsSplit { // the whole frame fits q.initialCryptoData = q.initialCryptoData[1:] return f @@ -74,17 +72,17 @@ func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Fr return nil } f := q.initial[0] - if f.Length(q.version) > maxLen { + if f.Length(v) > maxLen { return nil } q.initial = q.initial[1:] return f } -func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire.Frame { +func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame { if len(q.handshakeCryptoData) > 0 { f := q.handshakeCryptoData[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version) + newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) if newFrame == nil && !needsSplit { // the whole frame fits q.handshakeCryptoData = q.handshakeCryptoData[1:] return f @@ -97,19 +95,19 @@ func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire. return nil } f := q.handshake[0] - if f.Length(q.version) > maxLen { + if f.Length(v) > maxLen { return nil } q.handshake = q.handshake[1:] return f } -func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount) wire.Frame { +func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame { if len(q.appData) == 0 { return nil } f := q.appData[0] - if f.Length(q.version) > maxLen { + if f.Length(v) > maxLen { return nil } q.appData = q.appData[1:] diff --git a/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go b/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go index 9658bf5b5..d28c5f394 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/send_stream.go @@ -18,7 +18,7 @@ type sendStreamI interface { SendStream handleStopSendingFrame(*wire.StopSendingFrame) hasData() bool - popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool) + popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool) closeForShutdown(error) updateSendWindow(protocol.ByteCount) } @@ -54,8 +54,6 @@ type sendStream struct { deadline time.Time flowController flowcontrol.StreamFlowController - - version protocol.VersionNumber } var ( @@ -67,7 +65,6 @@ func newSendStream( streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, ) *sendStream { s := &sendStream{ streamID: streamID, @@ -75,7 +72,6 @@ func newSendStream( flowController: flowController, writeChan: make(chan struct{}, 1), writeOnce: make(chan struct{}, 1), // cap: 1, to protect against concurrent use of Write - version: version, } s.ctx, s.ctxCancel = context.WithCancel(context.Background()) return s @@ -204,9 +200,9 @@ func (s *sendStream) canBufferStreamFrame() bool { // popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream // maxBytes is the maximum length this frame (including frame header) will have. -func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool /* has more data to send */) { +func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool /* has more data to send */) { s.mutex.Lock() - f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes) + f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v) if f != nil { s.numOutstandingFrames++ } @@ -215,16 +211,20 @@ func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Fr if f == nil { return nil, hasMoreData } - return &ackhandler.Frame{Frame: f, OnLost: s.queueRetransmission, OnAcked: s.frameAcked}, hasMoreData + af := ackhandler.GetFrame() + af.Frame = f + af.OnLost = s.queueRetransmission + af.OnAcked = s.frameAcked + return af, hasMoreData } -func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more data to send */) { +func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more data to send */) { if s.canceledWrite || s.closeForShutdownErr != nil { return nil, false } if len(s.retransmissionQueue) > 0 { - f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes) + f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v) if f != nil || hasMoreRetransmissions { if f == nil { return nil, true @@ -260,7 +260,7 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun return nil, true } - f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow) + f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v) if dataLen := f.DataLen(); dataLen > 0 { s.writeOffset += f.DataLen() s.flowController.AddBytesSent(f.DataLen()) @@ -272,12 +272,12 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun return f, hasMoreData } -func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) (*wire.StreamFrame, bool) { +func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool) { if s.nextFrame != nil { nextFrame := s.nextFrame s.nextFrame = nil - maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, s.version)) + maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, v)) if nextFrame.DataLen() > maxDataLen { s.nextFrame = wire.GetStreamFrame() s.nextFrame.StreamID = s.streamID @@ -299,7 +299,7 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) f.DataLenPresent = true f.Data = f.Data[:0] - hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow) + hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow, v) if len(f.Data) == 0 && !f.Fin { f.PutBack() return nil, hasMoreData @@ -307,8 +307,8 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) return f, hasMoreData } -func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount) bool { - maxDataLen := f.MaxDataLen(maxBytes, s.version) +func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) bool { + maxDataLen := f.MaxDataLen(maxBytes, v) if maxDataLen == 0 { // a STREAM frame must have at least one byte of data return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting } @@ -317,9 +317,9 @@ func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxByte return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting } -func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more retransmissions */) { +func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more retransmissions */) { f := s.retransmissionQueue[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, s.version) + newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, v) if needsSplit { return newFrame, true } @@ -416,11 +416,11 @@ func (s *sendStream) Close() error { } func (s *sendStream) CancelWrite(errorCode StreamErrorCode) { - s.cancelWriteImpl(errorCode, fmt.Errorf("Write on stream %d canceled with error code %d", s.streamID, errorCode)) + s.cancelWriteImpl(errorCode, false) } // must be called after locking the mutex -func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, writeErr error) { +func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) { s.mutex.Lock() if s.canceledWrite { s.mutex.Unlock() @@ -428,7 +428,7 @@ func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, writeErr er } s.ctxCancel() s.canceledWrite = true - s.cancelWriteErr = writeErr + s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} s.numOutstandingFrames = 0 s.retransmissionQueue = nil newlyCompleted := s.isNewlyCompleted() @@ -457,10 +457,7 @@ func (s *sendStream) updateSendWindow(limit protocol.ByteCount) { } func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) { - s.cancelWriteImpl(frame.ErrorCode, &StreamError{ - StreamID: s.streamID, - ErrorCode: frame.ErrorCode, - }) + s.cancelWriteImpl(frame.ErrorCode, true) } func (s *sendStream) Context() context.Context { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/server.go b/vendor/github.com/Psiphon-Labs/quic-go/server.go index a6cffa965..d7865c657 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/server.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/server.go @@ -1,7 +1,6 @@ package quic import ( - "bytes" "context" "crypto/rand" "crypto/tls" @@ -89,7 +88,6 @@ type baseServer struct { *Config, *tls.Config, *handshake.TokenGenerator, - bool, /* enable 0-RTT */ bool, /* client address validated by an address validation token */ logging.ConnectionTracer, uint64, @@ -350,7 +348,7 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s } // If we're creating a new connection, the packet will be passed to the connection. // The header will then be parsed again. - hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDGenerator.ConnectionIDLen()) + hdr, _, _, err := wire.ParsePacket(p.data) if err != nil { if s.config.Tracer != nil { s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError) @@ -358,10 +356,6 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s s.logger.Debugf("Error parsing packet: %s", err) return false } - // Short header packets should never end up here in the first place - if !hdr.IsLongHeader { - panic(fmt.Sprintf("misrouted packet: %#v", hdr)) - } // [Psiphon] // To accomodate additional messages, obfuscated QUIC packets may reserve @@ -380,7 +374,7 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s return false } - if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial { + if hdr.Type != protocol.PacketTypeInitial { // Drop long header packets. // There's little point in sending a Stateless Reset, since the client // might not have received the token yet. @@ -474,8 +468,11 @@ func (s *stubCryptoSetup) Get1RTTSealer() (handshake.ShortHeaderSealer, error) { // frame, and calls Config.VerifyClientHelloRandom. func (s *baseServer) verifyClientHelloRandom(p *receivedPacket, hdr *wire.Header) error { + // TODO: support QUICv2 + versionNumber := protocol.Version1 + _, initialOpener := handshake.NewInitialAEAD( - hdr.DestConnectionID, protocol.PerspectiveServer, protocol.Version1) + hdr.DestConnectionID, protocol.PerspectiveServer, versionNumber) cs := &stubCryptoSetup{ initialOpener: initialOpener, @@ -485,17 +482,17 @@ func (s *baseServer) verifyClientHelloRandom(p *receivedPacket, hdr *wire.Header // original packet data must be retained for subsequent processing. data := append([]byte(nil), p.data...) - unpacker := newPacketUnpacker(cs, 0, protocol.Version1) - unpacked, err := unpacker.UnpackLongHeader(hdr, p.rcvTime, data) + unpacker := newPacketUnpacker(cs, 0) + unpacked, err := unpacker.UnpackLongHeader(hdr, p.rcvTime, data, versionNumber) if err != nil { return fmt.Errorf("verifyClientHelloRandom: UnpackLongHeader: %w", err) } - parser := wire.NewFrameParser(s.config.EnableDatagrams, protocol.Version1) + parser := wire.NewFrameParser(s.config.EnableDatagrams) d := unpacked.data for len(d) > 0 { - l, frame, err := parser.ParseNext(d, protocol.EncryptionInitial) + l, frame, err := parser.ParseNext(d, protocol.EncryptionInitial, versionNumber) if err != nil { return fmt.Errorf("verifyClientHelloRandom: ParseNext: %w", err) } @@ -653,7 +650,6 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro s.config, s.tlsConf, s.tokenGenerator, - s.acceptEarlyConns, clientAddrIsValid, tracer, tracingID, @@ -715,7 +711,6 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack return err } replyHdr := &wire.ExtendedHeader{} - replyHdr.IsLongHeader = true replyHdr.Type = protocol.PacketTypeRetry replyHdr.Version = hdr.Version replyHdr.SrcConnectionID = srcConnID @@ -727,19 +722,19 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack replyHdr.Log(s.logger) } - packetBuffer := getPacketBuffer() - defer packetBuffer.Release() - buf := bytes.NewBuffer(packetBuffer.Data) - if err := replyHdr.Write(buf, hdr.Version); err != nil { + buf := getPacketBuffer() + defer buf.Release() + buf.Data, err = replyHdr.Append(buf.Data, hdr.Version) + if err != nil { return err } // append the Retry integrity tag - tag := handshake.GetRetryIntegrityTag(buf.Bytes(), hdr.DestConnectionID, hdr.Version) - buf.Write(tag[:]) + tag := handshake.GetRetryIntegrityTag(buf.Data, hdr.DestConnectionID, hdr.Version) + buf.Data = append(buf.Data, tag[:]...) if s.config.Tracer != nil { - s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(buf.Len()), nil) + s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(buf.Data)), nil) } - _, err = s.conn.WritePacket(buf.Bytes(), remoteAddr, info.OOB()) + _, err = s.conn.WritePacket(buf.Data, remoteAddr, info.OOB()) return err } @@ -777,47 +772,46 @@ func (s *baseServer) sendConnectionRefused(remoteAddr net.Addr, hdr *wire.Header // sendError sends the error as a response to the packet received with header hdr func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer handshake.LongHeaderSealer, errorCode qerr.TransportErrorCode, info *packetInfo) error { - packetBuffer := getPacketBuffer() - defer packetBuffer.Release() - buf := bytes.NewBuffer(packetBuffer.Data) + b := getPacketBuffer() + defer b.Release() ccf := &wire.ConnectionCloseFrame{ErrorCode: uint64(errorCode)} replyHdr := &wire.ExtendedHeader{} - replyHdr.IsLongHeader = true replyHdr.Type = protocol.PacketTypeInitial replyHdr.Version = hdr.Version replyHdr.SrcConnectionID = hdr.DestConnectionID replyHdr.DestConnectionID = hdr.SrcConnectionID replyHdr.PacketNumberLen = protocol.PacketNumberLen4 replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead()) - if err := replyHdr.Write(buf, hdr.Version); err != nil { + var err error + b.Data, err = replyHdr.Append(b.Data, hdr.Version) + if err != nil { return err } - payloadOffset := buf.Len() + payloadOffset := len(b.Data) - raw := buf.Bytes() - raw, err := ccf.Append(raw, hdr.Version) + b.Data, err = ccf.Append(b.Data, hdr.Version) if err != nil { return err } - _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset]) - raw = raw[0 : len(raw)+sealer.Overhead()] + _ = sealer.Seal(b.Data[payloadOffset:payloadOffset], b.Data[payloadOffset:], replyHdr.PacketNumber, b.Data[:payloadOffset]) + b.Data = b.Data[0 : len(b.Data)+sealer.Overhead()] pnOffset := payloadOffset - int(replyHdr.PacketNumberLen) sealer.EncryptHeader( - raw[pnOffset+4:pnOffset+4+16], - &raw[0], - raw[pnOffset:payloadOffset], + b.Data[pnOffset+4:pnOffset+4+16], + &b.Data[0], + b.Data[pnOffset:payloadOffset], ) replyHdr.Log(s.logger) wire.LogFrame(s.logger, ccf, true) if s.config.Tracer != nil { - s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(raw)), []logging.Frame{ccf}) + s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(b.Data)), []logging.Frame{ccf}) } - _, err = s.conn.WritePacket(raw, remoteAddr, info.OOB()) + _, err = s.conn.WritePacket(b.Data, remoteAddr, info.OOB()) return err } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/stream.go b/vendor/github.com/Psiphon-Labs/quic-go/stream.go index 6c01c7f29..a418b10fa 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/stream.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/stream.go @@ -60,7 +60,7 @@ type streamI interface { // for sending hasData() bool handleStopSendingFrame(*wire.StopSendingFrame) - popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool) + popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool) updateSendWindow(protocol.ByteCount) } @@ -80,8 +80,6 @@ type stream struct { sender streamSender receiveStreamCompleted bool sendStreamCompleted bool - - version protocol.VersionNumber } var _ Stream = &stream{} @@ -90,9 +88,8 @@ var _ Stream = &stream{} func newStream(streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, ) *stream { - s := &stream{sender: sender, version: version} + s := &stream{sender: sender} senderForSendStream := &uniStreamSender{ streamSender: sender, onStreamCompletedImpl: func() { @@ -102,7 +99,7 @@ func newStream(streamID protocol.StreamID, s.completedMutex.Unlock() }, } - s.sendStream = *newSendStream(streamID, senderForSendStream, flowController, version) + s.sendStream = *newSendStream(streamID, senderForSendStream, flowController) senderForReceiveStream := &uniStreamSender{ streamSender: sender, onStreamCompletedImpl: func() { @@ -112,7 +109,7 @@ func newStream(streamID protocol.StreamID, s.completedMutex.Unlock() }, } - s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController, version) + s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController) return s } diff --git a/vendor/github.com/Psiphon-Labs/quic-go/streams_map.go b/vendor/github.com/Psiphon-Labs/quic-go/streams_map.go index 09cebd568..ae61db970 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/streams_map.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/streams_map.go @@ -46,7 +46,6 @@ var errTooManyOpenStreams = errors.New("too many open streams") type streamsMap struct { perspective protocol.Perspective - version protocol.VersionNumber maxIncomingBidiStreams uint64 maxIncomingUniStreams uint64 @@ -70,7 +69,6 @@ func newStreamsMap( maxIncomingBidiStreams uint64, maxIncomingUniStreams uint64, perspective protocol.Perspective, - version protocol.VersionNumber, ) streamManager { m := &streamsMap{ perspective: perspective, @@ -78,7 +76,6 @@ func newStreamsMap( maxIncomingBidiStreams: maxIncomingBidiStreams, maxIncomingUniStreams: maxIncomingUniStreams, sender: sender, - version: version, } m.initMaps() return m @@ -89,7 +86,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeBidi, func(num protocol.StreamNum) streamI { id := num.StreamID(protocol.StreamTypeBidi, m.perspective) - return newStream(id, m.sender, m.newFlowController(id), m.version) + return newStream(id, m.sender, m.newFlowController(id)) }, m.sender.queueControlFrame, ) @@ -97,7 +94,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeBidi, func(num protocol.StreamNum) streamI { id := num.StreamID(protocol.StreamTypeBidi, m.perspective.Opposite()) - return newStream(id, m.sender, m.newFlowController(id), m.version) + return newStream(id, m.sender, m.newFlowController(id)) }, m.maxIncomingBidiStreams, m.sender.queueControlFrame, @@ -106,7 +103,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeUni, func(num protocol.StreamNum) sendStreamI { id := num.StreamID(protocol.StreamTypeUni, m.perspective) - return newSendStream(id, m.sender, m.newFlowController(id), m.version) + return newSendStream(id, m.sender, m.newFlowController(id)) }, m.sender.queueControlFrame, ) @@ -114,7 +111,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeUni, func(num protocol.StreamNum) receiveStreamI { id := num.StreamID(protocol.StreamTypeUni, m.perspective.Opposite()) - return newReceiveStream(id, m.sender, m.newFlowController(id), m.version) + return newReceiveStream(id, m.sender, m.newFlowController(id)) }, m.maxIncomingUniStreams, m.sender.queueControlFrame, diff --git a/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_linux.go b/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_linux.go index 0b48a212e..a31a3d4a5 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_linux.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_linux.go @@ -6,8 +6,9 @@ import ( "errors" "syscall" - "github.com/Psiphon-Labs/quic-go/internal/utils" "golang.org/x/sys/unix" + + "github.com/Psiphon-Labs/quic-go/internal/utils" ) func setDF(rawConn syscall.RawConn) error { diff --git a/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_windows.go b/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_windows.go index 0767df0bb..d0648afee 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_windows.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_windows.go @@ -6,8 +6,9 @@ import ( "errors" "syscall" - "github.com/Psiphon-Labs/quic-go/internal/utils" "golang.org/x/sys/windows" + + "github.com/Psiphon-Labs/quic-go/internal/utils" ) const ( diff --git a/vendor/github.com/Psiphon-Labs/quic-go/token_store.go b/vendor/github.com/Psiphon-Labs/quic-go/token_store.go index 4f17c047d..bb370bc18 100644 --- a/vendor/github.com/Psiphon-Labs/quic-go/token_store.go +++ b/vendor/github.com/Psiphon-Labs/quic-go/token_store.go @@ -1,10 +1,10 @@ package quic import ( - "container/list" "sync" "github.com/Psiphon-Labs/quic-go/internal/utils" + list "github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist" ) type singleOriginTokenStore struct { @@ -48,8 +48,8 @@ type lruTokenStoreEntry struct { type lruTokenStore struct { mutex sync.Mutex - m map[string]*list.Element - q *list.List + m map[string]*list.Element[*lruTokenStoreEntry] + q *list.List[*lruTokenStoreEntry] capacity int singleOriginSize int } @@ -61,8 +61,8 @@ var _ TokenStore = &lruTokenStore{} // tokensPerOrigin specifies the maximum number of tokens per origin. func NewLRUTokenStore(maxOrigins, tokensPerOrigin int) TokenStore { return &lruTokenStore{ - m: make(map[string]*list.Element), - q: list.New(), + m: make(map[string]*list.Element[*lruTokenStoreEntry]), + q: list.New[*lruTokenStoreEntry](), capacity: maxOrigins, singleOriginSize: tokensPerOrigin, } @@ -73,7 +73,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) { defer s.mutex.Unlock() if el, ok := s.m[key]; ok { - entry := el.Value.(*lruTokenStoreEntry) + entry := el.Value entry.cache.Add(token) s.q.MoveToFront(el) return @@ -90,7 +90,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) { } elem := s.q.Back() - entry := elem.Value.(*lruTokenStoreEntry) + entry := elem.Value delete(s.m, entry.key) entry.key = key entry.cache = newSingleOriginTokenStore(s.singleOriginSize) @@ -106,7 +106,7 @@ func (s *lruTokenStore) Pop(key string) *ClientToken { var token *ClientToken if el, ok := s.m[key]; ok { s.q.MoveToFront(el) - cache := el.Value.(*lruTokenStoreEntry).cache + cache := el.Value.cache token = cache.Pop() if cache.Len() == 0 { s.q.Remove(el) diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/.editorconfig new file mode 100644 index 000000000..b0c95367e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/.gitattributes new file mode 100644 index 000000000..176a458f9 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/.gitignore new file mode 100644 index 000000000..5e3002f88 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md new file mode 100644 index 000000000..61d8ebffc --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -0,0 +1,364 @@ +# Changelog + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/LICENSE.txt new file mode 100644 index 000000000..f311b1eaa --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/README.md new file mode 100644 index 000000000..72579471f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/Taskfile.yml new file mode 100644 index 000000000..cdcfd223b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '2' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go new file mode 100644 index 000000000..d06e516d4 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/date.go new file mode 100644 index 000000000..ed022ddac --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/defaults.go new file mode 100644 index 000000000..b9f979666 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/dict.go new file mode 100644 index 000000000..77ebc61b1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/doc.go new file mode 100644 index 000000000..aabb9d448 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/functions.go new file mode 100644 index 000000000..5ea74f899 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/list.go new file mode 100644 index 000000000..ca0fbb789 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/network.go new file mode 100644 index 000000000..108d78a94 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/numeric.go new file mode 100644 index 000000000..98cbb37a1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/reflect.go new file mode 100644 index 000000000..8a65c132f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go new file mode 100644 index 000000000..fab551018 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/strings.go new file mode 100644 index 000000000..3c62d6b6f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/url.go new file mode 100644 index 000000000..b8e120e19 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 000000000..fd736cb1c --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 000000000..8c8c37d2c --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 000000000..ab7f03ae2 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,567 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + for _, s := range p.Sample { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 000000000..ea8e66c68 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 000000000..bef1d6046 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 000000000..91f45e53c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 000000000..0c8f3bb5b --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1225 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// The last stack trace is of the form: +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + line = "" + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 000000000..9978e7330 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,481 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID map[uint64]*Location + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) + } + + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) + } + sort.Strings(labels) + + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + } + sort.Strings(numlabels) + + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), + } +} + +type sampleKey struct { + locations string + labels string + numlabels string +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l, ok := pm.locationsByID[src.ID]; ok { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID[src.ID] = ll + return ll + } + pm.locationsByID[src.ID] = l + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 000000000..2590c8ddb --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,805 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = ioutil.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 000000000..539ad3ab3 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, int64(u)) + } + *x = append(*x, tmp...) + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, u) + } + *x = append(*x, tmp...) + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 000000000..02d21a818 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,178 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 000000000..a61021d08 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 000000000..778bfd7c7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go new file mode 100644 index 000000000..dd1d143cc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -0,0 +1,809 @@ +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func newColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go new file mode 100644 index 000000000..43b16211d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -0,0 +1,195 @@ +package formatter + +import ( + "fmt" + "os" + "regexp" + "strings" +) + +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...interface{}) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...interface{}) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": "\x1b[38;5;9m", + "orange": "\x1b[38;5;214m", + "coral": "\x1b[38;5;204m", + "magenta": "\x1b[38;5;13m", + "green": "\x1b[38;5;10m", + "dark-green": "\x1b[38;5;28m", + "yellow": "\x1b[38;5;11m", + "light-yellow": "\x1b[38;5;228m", + "cyan": "\x1b[38;5;14m", + "gray": "\x1b[38;5;243m", + "light-gray": "\x1b[38;5;246m", + "blue": "\x1b[38;5;12m", + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...interface{}) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + out := fmt.Sprintf(f.style(format), args...) + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { + wordLength := f.length(word) + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 000000000..2efd28608 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 000000000..12e0e5659 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 000000000..88dd8d6b0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 000000000..a367a1fc9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 000000000..0273abe9c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,113 @@ +package generators + +import ( + "bytes" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + bootstrapTemplate.Execute(buf, data) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 000000000..93b0b4b25 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,239 @@ +package generators + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + specTemplate.Execute(f, data) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 000000000..c3470adbf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,41 @@ +package generators + +var specText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 000000000..3086e6056 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,63 @@ +package generators + +import ( + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 000000000..496ec4a28 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,152 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./") + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 000000000..bd3c6d028 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 000000000..cad238671 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,348 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 000000000..64dcb1b78 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,283 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 000000000..bd9ca7d51 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 000000000..6c61f09d1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 000000000..e9abb27d8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 000000000..c197bb686 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,218 @@ +package outline + +import ( + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 000000000..4328ab391 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 000000000..4b45e7627 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,103 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 000000000..36698d46a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 000000000..8ee0acc81 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,230 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(0, nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 000000000..7dd294394 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 000000000..6c485c5b1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 000000000..26418ac62 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 000000000..f5ddff30f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 000000000..e9f7ec0cb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 000000000..b4892bebf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 000000000..53272df7e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 000000000..83dbeb1e8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,190 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(0, nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 000000000..ad224bc59 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,196 @@ +package interrupt_handler + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second +const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10 +const ABORT_POLLING_INTERVAL = 500 * time.Millisecond +const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + + InterruptCauseSignal + InterruptCauseTimeout + InterruptCauseAbortByOtherProcess +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseTimeout: + return "Interrupted by Timeout" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Interrupted bool + Channel chan interface{} + Cause InterruptCause +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus + SetInterruptPlaceholderMessage(string) + ClearInterruptPlaceholderMessage() + InterruptMessage() (string, bool) +} + +type InterruptHandler struct { + c chan interface{} + lock *sync.Mutex + interrupted bool + interruptPlaceholderMessage string + interruptCause InterruptCause + client parallel_support.Client + stop chan interface{} +} + +func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler { + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + interrupted: false, + stop: make(chan interface{}), + client: client, + } + handler.registerForInterrupts(timeout) + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) + + // timeout handling + var timeoutChannel <-chan time.Time + var timeoutTimer *time.Timer + if timeout > 0 { + timeoutTimer = time.NewTimer(timeout) + timeoutChannel = timeoutTimer.C + } + + // cross-process abort handling + var abortChannel chan bool + if handler.client != nil { + abortChannel = make(chan bool) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + abortChannel <- true + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + // listen for any interrupt signals + // note that some (timeouts, cross-process aborts) will only trigger once + // for these we set up a ticker to keep interrupting the suite until it ends + // this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up + // get interrupted eventually + go func() { + var interruptCause InterruptCause + var repeatChannel <-chan time.Time + var repeatTicker *time.Ticker + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-timeoutChannel: + interruptCause = InterruptCauseTimeout + repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT) + if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION { + repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION + } + timeoutTimer.Stop() + repeatTicker = time.NewTicker(repeatInterruptTimeout) + repeatChannel = repeatTicker.C + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION) + repeatChannel = repeatTicker.C + case <-repeatChannel: + //do nothing, just interrupt again using the same interruptCause + case <-handler.stop: + if timeoutTimer != nil { + timeoutTimer.Stop() + } + if repeatTicker != nil { + repeatTicker.Stop() + } + signal.Stop(signalChannel) + return + } + handler.lock.Lock() + handler.interruptCause = interruptCause + if handler.interruptPlaceholderMessage != "" { + fmt.Println(handler.interruptPlaceholderMessage) + } + handler.interrupted = true + close(handler.c) + handler.c = make(chan interface{}) + handler.lock.Unlock() + } + }() +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + defer handler.lock.Unlock() + + return InterruptStatus{ + Interrupted: handler.interrupted, + Channel: handler.c, + Cause: handler.interruptCause, + } +} + +func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.interruptPlaceholderMessage = message +} + +func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.interruptPlaceholderMessage = "" +} + +func (handler *InterruptHandler) InterruptMessage() (string, bool) { + handler.lock.Lock() + out := fmt.Sprintf("%s", handler.interruptCause.String()) + defer handler.lock.Unlock() + return out, handler.interruptCause != InterruptCauseAbortByOtherProcess +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 000000000..bf0de496d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 000000000..fcf8da833 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 000000000..b417bf5b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,70 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan interface{} + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 000000000..ad9932f2a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,156 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data interface{}) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data interface{}) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + if err == ErrorGone { + return true + } + return false +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 000000000..fa3ac682a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,223 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) + + //synchronization endpoints + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 000000000..fe93cc2b9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,123 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data interface{}) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 000000000..2620fd562 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 000000000..7c6e67b96 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,209 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan interface{}), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 000000000..ccd3317a5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,555 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastChar string + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastChar: "\n", + lastEmissionWasDelimiter: false, + + specDenoter: "•", + retryDenoter: "↺", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { + return + } + + r.emitDelimiter() + indentation := uint(0) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) + } else { + if len(report.ContainerHierarchyTexts) > 0 { + r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) + indentation = 1 + } + line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) + labels := report.Labels() + if len(labels) > 0 { + line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) + } + r.emitBlock(line) + } + r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + var header, highlightColor string + includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter + succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) + + hasGW := report.CapturedGinkgoWriterOutput != "" + hasStd := report.CapturedStdOutErr != "" + hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + denoter = fmt.Sprintf("[%s]", report.LeafNodeType) + } + + switch report.State { + case types.SpecStatePassed: + highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose) + emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { + header = fmt.Sprintf("%s PASSED", denoter) + } else { + return + } + } else { + header, stream = denoter, true + if report.NumAttempts > 1 { + header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false + } + if report.RunTime > r.conf.SlowSpecThreshold { + header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false + } + } + if hasStd || emitGinkgoWriterOutput || hasEmittableReports { + stream = false + } + case types.SpecStatePending: + highlightColor = "{{yellow}}" + includeRuntime, emitGinkgoWriterOutput = false, false + if v.Is(types.VerbosityLevelSuccinct) { + header, stream = "P", true + } else { + header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) + } + case types.SpecStateSkipped: + highlightColor = "{{cyan}}" + if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { + header = "S [SKIPPED]" + } else { + header, stream = "S", true + } + case types.SpecStateFailed: + highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter) + case types.SpecStatePanicked: + highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter) + case types.SpecStateInterrupted: + highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter) + case types.SpecStateAborted: + highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter) + } + + // Emit stream and return + if stream { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + // Emit header + r.emitDelimiter() + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + + // Emit Code Location Block + r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) + + //Emit Stdout/Stderr Output + if hasStd { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) + } + + //Emit Captured GinkgoWriter Output + if emitGinkgoWriterOutput && hasGW { + r.emitBlock("\n") + r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0) + } + + if hasEmittableReports { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) + reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) + if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { + reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) + } + for _, entry := range reportEntries { + r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(3, representation)) + } + } + r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) + } + + // Emit Failure Message + if !report.Failure.IsZero() { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message)) + r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location)) + if report.Failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic)) + } + + if r.conf.FullTrace || report.Failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace)) + } + + if !report.Failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(1, false, report.Failure.ProgressReport) + } + } + + r.emitDelimiter() +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter() + + if report.RunningInParallel { + r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + r.emitProgressReport(0, true, report) + r.emitDelimiter() +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.LeafNodeText != "" { + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + } + r.emit(r.f("{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) { + r.emit("\n") + r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) { + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + if limit == 0 { + r.emitBlock(r.fi(indent+1, "%s", output)) + } else { + lines := strings.Split(output, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", output)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + if len(s) > 0 { + r.lastChar = s[len(s)-1:] + r.lastEmissionWasDelimiter = false + r.writer.Write([]byte(s)) + } +} + +func (r *DefaultReporter) emitBlock(s string) { + if len(s) > 0 { + if r.lastChar != "\n" { + r.emit("\n") + } + r.emit(s) + if r.lastChar != "\n" { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitDelimiter() { + if r.lastEmissionWasDelimiter { + return + } + r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) + r.lastEmissionWasDelimiter = true +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...interface{}) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, report.LeafNodeText) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + } + + out := "" + if succinct { + out += r.f("%s", r.cycleJoin(texts, " ")) + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) + } + } else { + for i := range texts { + out += r.fi(uint(i), "%s", texts[i]) + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 000000000..89d30076b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 000000000..7f96c450f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,60 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/types" +) + +//GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + f, err := os.Create(destination) + if err != nil { + return err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return f.Close() +} + +//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +//It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + f, err := os.Create(destination) + if err != nil { + return messages, err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, f.Close() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 000000000..0f165069a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,338 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + SystemOut: systemOutForUnstructuredReporters(spec), + SystemErr: systemErrForUnstructuredReporters(spec), + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: "interrupted", + Type: "interrupted", + Description: interruptDescriptionForUnstructuredReporters(spec.Failure), + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func interruptDescriptionForUnstructuredReporters(failure types.Failure) string { + out := &strings.Builder{} + out.WriteString(failure.Message + "\n") + NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(failure.ProgressReport) + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + gw := spec.CapturedGinkgoWriterOutput + cursor := 0 + for _, pr := range spec.ProgressReports { + if cursor < pr.GinkgoWriterOffset { + if pr.GinkgoWriterOffset < len(gw) { + out.WriteString(gw[cursor:pr.GinkgoWriterOffset]) + cursor = pr.GinkgoWriterOffset + } else if cursor < len(gw) { + out.WriteString(gw[cursor:]) + cursor = len(gw) + } + } + NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr) + } + + if cursor < len(gw) { + out.WriteString(gw[cursor:]) + } + + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + systemOut := spec.CapturedStdOutErr + if len(spec.ReportEntries) > 0 { + systemOut += "\nReport Entries:\n" + for i, entry := range spec.ReportEntries { + systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) + if representation := entry.StringRepresentation(); representation != "" { + systemOut += representation + "\n" + } + if i+1 < len(spec.ReportEntries) { + systemOut += "--\n" + } + } + } + return systemOut +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 000000000..f79f005db --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,21 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) + EmitProgressReport(progressReport types.ProgressReport) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 000000000..00b038769 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,97 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateInterrupted: + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(interruptDescriptionForUnstructuredReporters(spec.Failure))) + case types.SpecStateAborted: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 000000000..129109183 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,92 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + return CodeLocation{FileName: file, LineNumber: line} +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip+1) + return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.Match([]byte(stack[i*2+1])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 000000000..438c947c8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,732 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration + Timeout time.Duration + OutputInterceptorMode string + SourceRoots []string + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + SlowSpecThreshold time.Duration + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + AlwaysEmitGinkgoWriter bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{ + SlowSpecThreshold: 5 * time.Second, + } +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.NumCPU() + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.NumCPU() +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite."}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", + Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter."}, + {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", + Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", + Usage: "If set, default reporter prints out captured output of passed tests."}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + args := []string{"test", "-c", "-o", destination, packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]interface{}{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]interface{}{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]interface{}{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]interface{}{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]interface{}{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 000000000..17922304b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go new file mode 100644 index 000000000..2948dfa0c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -0,0 +1,170 @@ +package types + +import ( + "os" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type Deprecation struct { + Message string + DocLink string + Version string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", + DocLink: "removed-custom-reporters", + Version: "1.16.0", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + Version: "1.16.0", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + DocLink: "removed-measure", + Version: "1.16.3", + } +} + +func (d deprecations) ParallelNode() Deprecation { + return Deprecation{ + Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", + DocLink: "renamed-ginkgoparallelnode", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + Version: "1.16.0", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + Version: "1.16.0", + } +} + +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") + if deprecation.Version != "" && ackVersion != "" { + ack := ParseSemVer(ackVersion) + version := ParseSemVer(deprecation.Version) + if ack.GreaterThanOrEqualTo(version) { + return + } + } + + d.lock.Lock() + defer d.lock.Unlock() + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) + return out +} + +type SemVer struct { + Major int + Minor int + Patch int +} + +func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { + return (s.Major > o.Major) || + (s.Major == o.Major && s.Minor > o.Minor) || + (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) +} + +func ParseSemVer(semver string) SemVer { + out := SemVer{} + semver = strings.TrimFunc(semver, func(r rune) bool { + return !(unicode.IsNumber(r) || r == '.') + }) + components := strings.Split(semver, ".") + if len(components) > 0 { + out.Major, _ = strconv.Atoi(components[0]) + } + if len(components) > 1 { + out.Minor, _ = strconv.Atoi(components[1]) + } + if len(components) > 2 { + out.Patch, _ = strconv.Atoi(components[2]) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 000000000..1d96ae028 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 000000000..6806d6afc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,543 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully constructed up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { + return GinkgoError{ + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { + return GinkgoError{ + Heading: "Unknown Type passed to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 000000000..cc21df71e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 000000000..9186ae873 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,489 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings interface{} + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //suppress all output as Ginkgo is responsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 000000000..0403f9e63 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,347 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +func tokenize(input string) func() (*treeNode, error) { + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.IndexRune(cutset, runes[j]) >= 0 { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/") + i += n + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 000000000..798bedc03 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,186 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +//and across the network connection when running in parallel +type ReportEntryValue struct { + raw interface{} //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value interface{}) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() interface{} { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representation at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Time captures the time the AddReportEntry was called + Time time.Time + // Location captures the location of the AddReportEntry call + Location CodeLocation + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() interface{} { + return entry.Value.GetRawValue() +} + + + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 000000000..aec9062ed --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,652 @@ +package types + +import ( + "encoding/json" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsuccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + SpecReports SpecReports +} + +//PreRunStats contains a set of stats captured before the test run begins. This is primarily used +//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +//Add is ued by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +//to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + for i := range report.SpecReports { + reports[i] = report.SpecReports[i] + } + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. Flakey specs can be retried with + // ginkgo --flake-attempts=N + NumAttempts int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +//Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +//Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +//FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +//LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +type SpecReports []SpecReport + +//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +//CountWithState returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if the user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext + FailureNodeType NodeType + FailureNodeLocation CodeLocation + FailureNodeContainerIndex int + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport +} + +func (f Failure) IsZero() bool { + return f.Message == "" && (f.Location == CodeLocation{}) +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + ParallelProcess int + RunningInParallel bool + + Time time.Time + + ContainerHierarchyTexts []string + LeafNodeText string + LeafNodeLocation CodeLocation + SpecStartTime time.Time + + CurrentNodeType NodeType + CurrentNodeText string + CurrentNodeLocation CodeLocation + CurrentNodeStartTime time.Time + + CurrentStepText string + CurrentStepLocation CodeLocation + CurrentStepStartTime time.Time + + CapturedGinkgoWriterOutput string `json:",omitempty"` + GinkgoWriterOffset int + + Goroutines []Goroutine +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "INVALID CLEANUP NODE", + uint(NodeTypeCleanupAfterEach): "DeferCleanup", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 000000000..3974fdc26 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.2.0" diff --git a/vendor/github.com/marten-seemann/qpack/.codecov.yml b/vendor/github.com/quic-go/qpack/.codecov.yml similarity index 100% rename from vendor/github.com/marten-seemann/qpack/.codecov.yml rename to vendor/github.com/quic-go/qpack/.codecov.yml diff --git a/vendor/github.com/marten-seemann/qpack/.gitignore b/vendor/github.com/quic-go/qpack/.gitignore similarity index 100% rename from vendor/github.com/marten-seemann/qpack/.gitignore rename to vendor/github.com/quic-go/qpack/.gitignore diff --git a/vendor/github.com/marten-seemann/qpack/.gitmodules b/vendor/github.com/quic-go/qpack/.gitmodules similarity index 100% rename from vendor/github.com/marten-seemann/qpack/.gitmodules rename to vendor/github.com/quic-go/qpack/.gitmodules diff --git a/vendor/github.com/marten-seemann/qpack/.golangci.yml b/vendor/github.com/quic-go/qpack/.golangci.yml similarity index 100% rename from vendor/github.com/marten-seemann/qpack/.golangci.yml rename to vendor/github.com/quic-go/qpack/.golangci.yml diff --git a/vendor/github.com/marten-seemann/qpack/LICENSE.md b/vendor/github.com/quic-go/qpack/LICENSE.md similarity index 100% rename from vendor/github.com/marten-seemann/qpack/LICENSE.md rename to vendor/github.com/quic-go/qpack/LICENSE.md diff --git a/vendor/github.com/marten-seemann/qpack/README.md b/vendor/github.com/quic-go/qpack/README.md similarity index 100% rename from vendor/github.com/marten-seemann/qpack/README.md rename to vendor/github.com/quic-go/qpack/README.md diff --git a/vendor/github.com/marten-seemann/qpack/decoder.go b/vendor/github.com/quic-go/qpack/decoder.go similarity index 100% rename from vendor/github.com/marten-seemann/qpack/decoder.go rename to vendor/github.com/quic-go/qpack/decoder.go diff --git a/vendor/github.com/marten-seemann/qpack/encoder.go b/vendor/github.com/quic-go/qpack/encoder.go similarity index 100% rename from vendor/github.com/marten-seemann/qpack/encoder.go rename to vendor/github.com/quic-go/qpack/encoder.go diff --git a/vendor/github.com/marten-seemann/qpack/header_field.go b/vendor/github.com/quic-go/qpack/header_field.go similarity index 100% rename from vendor/github.com/marten-seemann/qpack/header_field.go rename to vendor/github.com/quic-go/qpack/header_field.go diff --git a/vendor/github.com/marten-seemann/qpack/static_table.go b/vendor/github.com/quic-go/qpack/static_table.go similarity index 100% rename from vendor/github.com/marten-seemann/qpack/static_table.go rename to vendor/github.com/quic-go/qpack/static_table.go diff --git a/vendor/github.com/quic-go/qpack/tools.go b/vendor/github.com/quic-go/qpack/tools.go new file mode 100644 index 000000000..8f71eea26 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package qpack + +import _ "github.com/onsi/ginkgo/v2/ginkgo" diff --git a/vendor/github.com/marten-seemann/qpack/varint.go b/vendor/github.com/quic-go/qpack/varint.go similarity index 100% rename from vendor/github.com/marten-seemann/qpack/varint.go rename to vendor/github.com/quic-go/qpack/varint.go diff --git a/vendor/modules.txt b/vendor/modules.txt index 89a7a99ca..6c650a498 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -22,7 +22,7 @@ github.com/Psiphon-Labs/qtls-go1-18 # github.com/Psiphon-Labs/qtls-go1-19 v0.0.0-20221014165721-ed28749db082 ## explicit; go 1.18 github.com/Psiphon-Labs/qtls-go1-19 -# github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 +# github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc ## explicit; go 1.18 github.com/Psiphon-Labs/quic-go github.com/Psiphon-Labs/quic-go/http3 @@ -93,6 +93,9 @@ github.com/elazarl/goproxy/ext/auth ## explicit; go 1.13 github.com/florianl/go-nfqueue github.com/florianl/go-nfqueue/internal/unix +# github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 +## explicit; go 1.13 +github.com/go-task/slim-sprig # github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439 ## explicit github.com/gobwas/glob @@ -124,6 +127,9 @@ github.com/google/gopacket github.com/google/gopacket/layers # github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4 ## explicit +# github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 +## explicit; go 1.14 +github.com/google/pprof/profile # github.com/grafov/m3u8 v0.0.0-20171211212457-6ab8f28ed427 ## explicit github.com/grafov/m3u8 @@ -151,7 +157,6 @@ github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/marten-seemann/qpack v0.3.0 ## explicit; go 1.18 -github.com/marten-seemann/qpack # github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a ## explicit github.com/marusama/semaphore @@ -171,6 +176,24 @@ github.com/mitchellh/panicwrap # github.com/mroth/weightedrand v0.4.1 ## explicit; go 1.10 github.com/mroth/weightedrand +# github.com/onsi/ginkgo/v2 v2.2.0 +## explicit; go 1.18 +github.com/onsi/ginkgo/v2/config +github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/build +github.com/onsi/ginkgo/v2/ginkgo/command +github.com/onsi/ginkgo/v2/ginkgo/generators +github.com/onsi/ginkgo/v2/ginkgo/internal +github.com/onsi/ginkgo/v2/ginkgo/labels +github.com/onsi/ginkgo/v2/ginkgo/outline +github.com/onsi/ginkgo/v2/ginkgo/run +github.com/onsi/ginkgo/v2/ginkgo/unfocus +github.com/onsi/ginkgo/v2/ginkgo/watch +github.com/onsi/ginkgo/v2/internal/interrupt_handler +github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/reporters +github.com/onsi/ginkgo/v2/types # github.com/oschwald/maxminddb-golang v1.2.1-0.20170901134056-26fe5ace1c70 ## explicit github.com/oschwald/maxminddb-golang @@ -183,6 +206,9 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/quic-go/qpack v0.4.0 +## explicit; go 1.18 +github.com/quic-go/qpack # github.com/refraction-networking/gotapdance v1.2.0 => ./replace/gotapdance ## explicit; go 1.16 github.com/refraction-networking/gotapdance/ed25519/edwards25519 From 4953f5006a1dde19e9b74454ae29f033d31e9e8f Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Thu, 9 Feb 2023 16:36:01 -0500 Subject: [PATCH 20/44] Add HTTP transforms --- psiphon/common/parameters/parameters.go | 35 ++ psiphon/common/transforms/httpTransformer.go | 249 +++++++++++ .../common/transforms/httpTransformer_test.go | 391 ++++++++++++++++++ psiphon/common/transforms/transforms.go | 2 +- psiphon/config.go | 69 ++++ psiphon/dialParameters.go | 86 +++- psiphon/dialParameters_test.go | 104 +++++ psiphon/meekConn.go | 14 + psiphon/notice.go | 6 + psiphon/server/api.go | 1 + psiphon/serverApi.go | 6 + 11 files changed, 958 insertions(+), 5 deletions(-) create mode 100644 psiphon/common/transforms/httpTransformer.go create mode 100644 psiphon/common/transforms/httpTransformer_test.go diff --git a/psiphon/common/parameters/parameters.go b/psiphon/common/parameters/parameters.go index 0739abe42..8d7ed2a25 100755 --- a/psiphon/common/parameters/parameters.go +++ b/psiphon/common/parameters/parameters.go @@ -239,6 +239,7 @@ const ( ReplayAPIRequestPadding = "ReplayAPIRequestPadding" ReplayHoldOffTunnel = "ReplayHoldOffTunnel" ReplayResolveParameters = "ReplayResolveParameters" + ReplayHTTPTransformerParameters = "ReplayHTTPTransformerParameters" APIRequestUpstreamPaddingMinBytes = "APIRequestUpstreamPaddingMinBytes" APIRequestUpstreamPaddingMaxBytes = "APIRequestUpstreamPaddingMaxBytes" APIRequestDownstreamPaddingMinBytes = "APIRequestDownstreamPaddingMinBytes" @@ -320,6 +321,12 @@ const ( DNSResolverIncludeEDNS0Probability = "DNSResolverIncludeEDNS0Probability" DNSResolverCacheExtensionInitialTTL = "DNSResolverCacheExtensionInitialTTL" DNSResolverCacheExtensionVerifiedTTL = "DNSResolverCacheExtensionVerifiedTTL" + DirectHTTPProtocolTransformSpecs = "DirectHTTPProtocolTransformSpecs" + DirectHTTPProtocolTransformScopedSpecNames = "DirectHTTPProtocolTransformScopedSpecNames" + DirectHTTPProtocolTransformProbability = "DirectHTTPProtocolTransformProbability" + FrontedHTTPProtocolTransformSpecs = "FrontedHTTPProtocolTransformSpecs" + FrontedHTTPProtocolTransformScopedSpecNames = "FrontedHTTPProtocolTransformScopedSpecNames" + FrontedHTTPProtocolTransformProbability = "FrontedHTTPProtocolTransformProbability" ) const ( @@ -574,6 +581,7 @@ var defaultParameters = map[string]struct { ReplayAPIRequestPadding: {value: true}, ReplayHoldOffTunnel: {value: true}, ReplayResolveParameters: {value: true}, + ReplayHTTPTransformerParameters: {value: true}, APIRequestUpstreamPaddingMinBytes: {value: 0, minimum: 0}, APIRequestUpstreamPaddingMaxBytes: {value: 1024, minimum: 0}, @@ -675,6 +683,13 @@ var defaultParameters = map[string]struct { DNSResolverIncludeEDNS0Probability: {value: 0.0, minimum: 0.0}, DNSResolverCacheExtensionInitialTTL: {value: time.Duration(0), minimum: time.Duration(0)}, DNSResolverCacheExtensionVerifiedTTL: {value: time.Duration(0), minimum: time.Duration(0)}, + + DirectHTTPProtocolTransformSpecs: {value: transforms.Specs{}}, + DirectHTTPProtocolTransformScopedSpecNames: {value: transforms.ScopedSpecNames{}}, + DirectHTTPProtocolTransformProbability: {value: 0.0, minimum: 0.0}, + FrontedHTTPProtocolTransformSpecs: {value: transforms.Specs{}}, + FrontedHTTPProtocolTransformScopedSpecNames: {value: transforms.ScopedSpecNames{}}, + FrontedHTTPProtocolTransformProbability: {value: 0.0, minimum: 0.0}, } // IsServerSideOnly indicates if the parameter specified by name is used @@ -856,6 +871,22 @@ func (p *Parameters) Set( dnsResolverProtocolTransformSpecs, _ := dnsResolverProtocolTransformSpecsValue.(transforms.Specs) + directHttpProtocolTransformSpecsValue, err := getAppliedValue( + DirectHTTPProtocolTransformSpecs, parameters, applyParameters) + if err != nil { + return nil, errors.Trace(err) + } + directHttpProtocolTransformSpecs, _ := + directHttpProtocolTransformSpecsValue.(transforms.Specs) + + frontedHttpProtocolTransformSpecsValue, err := getAppliedValue( + FrontedHTTPProtocolTransformSpecs, parameters, applyParameters) + if err != nil { + return nil, errors.Trace(err) + } + frontedHttpProtocolTransformSpecs, _ := + frontedHttpProtocolTransformSpecsValue.(transforms.Specs) + for i := 0; i < len(applyParameters); i++ { count := 0 @@ -1031,6 +1062,10 @@ func (p *Parameters) Set( var specs transforms.Specs if name == DNSResolverProtocolTransformScopedSpecNames { specs = dnsResolverProtocolTransformSpecs + } else if name == DirectHTTPProtocolTransformScopedSpecNames { + specs = directHttpProtocolTransformSpecs + } else if name == FrontedHTTPProtocolTransformScopedSpecNames { + specs = frontedHttpProtocolTransformSpecs } err := v.Validate(specs) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go new file mode 100644 index 000000000..eb681acb4 --- /dev/null +++ b/psiphon/common/transforms/httpTransformer.go @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package transforms + +import ( + "bytes" + "context" + "math" + "net" + "net/textproto" + "strconv" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +type HTTPTransformerParameters struct { + // ProtocolTransformName specifies the name associated with + // ProtocolTransformSpec and is used for metrics. + ProtocolTransformName string + + // ProtocolTransformSpec specifies a transform to apply to the HTTP request. + // See: "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/transforms". + // + // HTTP transforms include strategies discovered by the Geneva team, + // https://geneva.cs.umd.edu. + ProtocolTransformSpec Spec + + // ProtocolTransformSeed specifies the seed to use for generating random + // data in the ProtocolTransformSpec transform. To replay a transform, + // specify the same seed. + ProtocolTransformSeed *prng.Seed +} + +const ( + // httpTransformerReadHeader HTTPTransformer is waiting to finish reading + // the next HTTP request header. + httpTransformerReadHeader = 0 + // httpTransformerReadWriteBody HTTPTransformer is waiting to finish reading + // and writing the current HTTP request body. + httpTransformerReadWriteBody = 1 +) + +// HTTPTransformer wraps a net.Conn, intercepting Write calls and applying the +// specified protocol transform. +// +// The HTTP request to be written (input to the Write) is converted to a +// string, transformed, and converted back to binary and then actually written +// to the underlying net.Conn. +// +// HTTPTransformer is not safe for concurrent use. +type HTTPTransformer struct { + transform Spec + seed *prng.Seed + + // state is the HTTPTransformer state. Possible values are + // httpTransformerReadingHeader and httpTransformerReadingBody. + state int64 + // b is the accumulated bytes of the current HTTP request. + b []byte + // remain is the number of remaining HTTP request body bytes to read into b. + remain uint64 + + net.Conn +} + +// Warning: Does not handle chunked encoding and multiple HTTP +// requests written in a single Write(). Must be called synchronously. +func (t *HTTPTransformer) Write(b []byte) (int, error) { + + if t.state == httpTransformerReadHeader { + + t.b = append(t.b, b...) + + // Wait until the entire HTTP request header has been read. Must check + // all accumulated bytes incase the "\r\n\r\n" separator is written over + // multiple Write() calls; from reading the net/http code the entire + // HTTP request is written in a single Write() call. + + sep := []byte("\r\n\r\n") + + headerBodyLines := bytes.SplitN(t.b, sep, 2) // split header and body + + if len(headerBodyLines) > 1 { + + // read Content-Length before applying transform + + var headerLines [][]byte + + lines := bytes.Split(headerBodyLines[0], []byte("\r\n")) + if len(lines) > 1 { + // skip request line, e.g. "GET /foo HTTP/1.1" + headerLines = lines[1:] + } + + var cl []byte + contentLengthHeader := []byte("Content-Length:") + + for _, header := range headerLines { + + if bytes.HasPrefix(header, contentLengthHeader) { + + cl = textproto.TrimBytes(header[len(contentLengthHeader):]) + break + } + } + if len(cl) == 0 { + // Either Content-Length header missing or Content-Length + // header value is empty, e.g. "Content-Length: ". + // b buffered in t.b + return len(b), errors.TraceNew("Content-Length missing") + } + + n, err := strconv.ParseUint(string(cl), 10, 63) + if err != nil { + return 0, errors.Trace(err) + } + + t.remain = n + + // transform and write header + + headerLen := len(headerBodyLines[0]) + len(sep) + header := t.b[:headerLen] + + if t.transform != nil { + newHeaderS, err := t.transform.Apply(t.seed, string(header)) + if err != nil { + return 0, errors.Trace(err) + } + + newHeader := []byte(newHeaderS) + + // only allocate new slice if header length changed + if len(newHeader) == len(header) { + copy(t.b[:len(header)], newHeader) + } else { + t.b = append(newHeader, t.b[len(header):]...) + } + + header = newHeader + } + + if math.MaxUint64-t.remain < uint64(len(header)) { + return 0, errors.TraceNew("t.remain + uint64(len(header)) overflows") + } + t.remain += uint64(len(header)) + + err = t.writeBuffer() + + if t.remain > 0 { + // Entire request, header and body, has been written. Return to + // waiting for next HTTP request header to arrive. + t.state = httpTransformerReadWriteBody + } + + if err != nil { + // b buffered in t.b + return len(b), errors.Trace(err) + } + } + + // b buffered in t.b + return len(b), nil + } + + // HTTP request header has been transformed. Write any remaining bytes of + // HTTP request header and then write HTTP request body. + + // Must write buffered bytes first, in-order, to write bytes to underlying + // Conn in the same order they were received in. + err := t.writeBuffer() + if err != nil { + // b not written or buffered + return 0, errors.Trace(err) + } + + n, err := t.Conn.Write(b) + + if uint64(n) > t.remain { + return 0, errors.TraceNew("t.remain - uint64(n) underflows") + } + + t.remain -= uint64(n) + + if t.remain <= 0 { + // Entire request, header and body, has been written. Return to + // waiting for next HTTP request header to arrive. + t.state = httpTransformerReadHeader + t.remain = 0 + } + + return n, errors.Trace(err) +} + +func (t *HTTPTransformer) writeBuffer() error { + for len(t.b) > 0 { + n, err := t.Conn.Write(t.b) + + if uint64(n) > t.remain { + return errors.TraceNew("t.remain - uint64(n) underflows") + } + + t.remain -= uint64(n) + + if n == len(t.b) { + t.b = nil + } else { + t.b = t.b[n:] + } + + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func WrapDialerWithHTTPTransformer(dialer common.Dialer, params *HTTPTransformerParameters) common.Dialer { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := dialer(ctx, network, addr) + if err != nil { + return nil, errors.Trace(err) + } + return &HTTPTransformer{ + Conn: conn, + transform: params.ProtocolTransformSpec, + seed: params.ProtocolTransformSeed, + }, nil + } +} diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go new file mode 100644 index 000000000..5005f41e3 --- /dev/null +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package transforms + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "strings" + "testing" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +func TestHTTPTransformerHTTPRequest(t *testing.T) { + + type test struct { + name string + input string + wantOutput string + wantError error + chunkSize int + transform Spec + connWriteLimit int + connWriteErrs []error + } + + tests := []test{ + { + name: "no transform", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 1, + }, + { + name: "no transform with partial write and errors", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 1, + connWriteLimit: 1, + connWriteErrs: []error{errors.New("err1"), errors.New("err2")}, + }, + { + name: "transform not applied to body", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 1, + transform: Spec{[2]string{"abcd", "efgh"}}, + }, + { + name: "Content-Length missing", + input: "HTTP 1.1\r\n\r\nabcd", + wantError: errors.New("Content-Length missing"), + chunkSize: 1, + }, + { + name: "Content-Length overflow", + input: fmt.Sprintf("HTTP 1.1\r\nContent-Length: %d\r\n\r\nabcd", uint64(math.MaxUint64)), + wantError: errors.New("strconv.ParseUint: parsing \"18446744073709551615\": value out of range"), + chunkSize: 1, + }, + { + name: "no transform", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 1, + }, + { + name: "incorrect Content-Length header value", + input: "HTTP 1.1\r\nContent-Length: 3\r\n\r\nabcd", + wantOutput: "HTTP 1.1\r\nContent-Length: 3\r\n\r\nabc", + chunkSize: 1, + }, + { + name: "single HTTP request written in a single write", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 999, + }, + { + name: "transform", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcd", + chunkSize: 1, + transform: Spec{[2]string{"4", "100"}}, + }, + { + name: "transform with partial write and errors in header write", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcd", + chunkSize: 1, + transform: Spec{[2]string{"4", "100"}}, + connWriteLimit: 1, + connWriteErrs: []error{errors.New("err1"), errors.New("err2")}, + }, + { + name: "transform with chunk write and errors in body write", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcd", + chunkSize: 39, + transform: Spec{[2]string{"4", "100"}}, + connWriteLimit: 1, + connWriteErrs: []error{errors.New("err1"), errors.New("err2"), errors.New("err3")}, + }, + // Multiple HTTP requests written in a single write not supported so an + // error is expected. + { + name: "multiple HTTP requests written in a single write", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", + chunkSize: 999, + wantError: errors.New("t.remain - uint64(n) underflows"), + }, + // Multiple HTTP requests written in a single write not supported so an + // error is expected because a write will occur where it contains both + // the end of the previous HTTP request and the start of a new one. + { + name: "multiple HTTP requests written in chunks", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", + wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", + chunkSize: 3, + wantError: errors.New("t.remain - uint64(n) underflows"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + seed, err := prng.NewSeed() + if err != nil { + t.Fatalf("prng.NewSeed failed %v", err) + } + + conn := testConn{ + writeLimit: tt.connWriteLimit, + writeErrs: tt.connWriteErrs, + } + + transformer := &HTTPTransformer{ + transform: tt.transform, + seed: seed, + Conn: &conn, + } + + remain := []byte(tt.input) + + // Write input bytes to transformer in chunks and then check + // output. + for { + if len(remain) == 0 { + break + } + + var b []byte + if len(remain) < tt.chunkSize { + b = remain + } else { + b = remain[:tt.chunkSize] + } + + expectedErr := len(conn.writeErrs) > 0 + + var n int + n, err = transformer.Write(b) + if err != nil { + if expectedErr { + // reset err + err = nil + } else { + // err checked outside loop + break + } + } + + remain = remain[n:] + } + if tt.wantError == nil { + if err != nil { + t.Fatalf("unexpected error %v", err) + } + } else { + // tt.wantError != nil + if err == nil { + t.Fatalf("expected error %v", tt.wantError) + } else if !strings.Contains(err.Error(), tt.wantError.Error()) { + t.Fatalf("expected error %v got %v", tt.wantError, err) + } + } + if tt.wantError == nil && string(conn.b) != tt.wantOutput { + t.Fatalf("expected \"%s\" of len %d but got \"%s\" of len %d", escapeNewlines(tt.wantOutput), len(tt.wantOutput), escapeNewlines(string(conn.b)), len(conn.b)) + } + }) + } +} + +func TestHTTPTransformerHTTPServer(t *testing.T) { + + type test struct { + name string + request func(string) *http.Request + wantBody string + transform Spec + } + + tests := []test{ + { + name: "request body truncated", + transform: Spec{[2]string{"Content-Length: 4", "Content-Length: 3"}}, + request: func(addr string) *http.Request { + + body := bytes.NewReader([]byte("abcd")) + + req, err := http.NewRequest("POST", "http://"+addr, body) + if err != nil { + panic(err) + } + + return req + }, + wantBody: "abc", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + seed, err := prng.NewSeed() + if err != nil { + t.Fatalf("prng.NewSeed failed %v", err) + } + + params := &HTTPTransformerParameters{ + ProtocolTransformName: "spec", + ProtocolTransformSpec: tt.transform, + ProtocolTransformSeed: seed, + } + + dialer := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + httpTransport := &http.Transport{ + DialContext: WrapDialerWithHTTPTransformer(dialer, params), + } + + type serverRequest struct { + req *http.Request + body []byte + } + + serverReq := make(chan *serverRequest, 1) + + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + b, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + go func() { + serverReq <- &serverRequest{ + req: r, + body: b, + } + close(serverReq) + }() + }) + + s := &http.Server{ + Addr: "127.0.0.1:8080", + } + + go func() { + s.ListenAndServe() + }() + + client := http.Client{ + Transport: httpTransport, + Timeout: 2 * time.Second, + } + + req := tt.request(s.Addr) + + resp, err := client.Do(req) + + // first shutdown server, then check err + shutdownErr := s.Shutdown(context.Background()) + if shutdownErr != nil { + t.Fatalf("s.Shutdown failed %v", shutdownErr) + } + + if err != nil { + t.Fatalf("client.Do failed %v", err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected 200 but got %d", resp.StatusCode) + } + + r := <-serverReq + + if tt.wantBody != string(r.body) { + t.Fatalf("expected body %s but got %s", tt.wantBody, string(r.body)) + } + }) + } +} + +func escapeNewlines(s string) string { + s = strings.ReplaceAll(s, "\n", "\\n") + s = strings.ReplaceAll(s, "\r", "\\r") + return s +} + +type testConn struct { + // b is the accumulated bytes from Write() calls. + b []byte + // writeLimit is the max number of bytes that will be written in a Write() + // call. + writeLimit int + // writeErrs are returned from Write() calls in order. If empty, then a nil + // error is returned. + writeErrs []error +} + +func (c *testConn) Read(b []byte) (n int, err error) { + return 0, nil +} + +func (c *testConn) Write(b []byte) (n int, err error) { + + if len(c.writeErrs) > 0 { + err = c.writeErrs[0] + c.writeErrs = c.writeErrs[1:] + } + + if c.writeLimit != 0 && c.writeLimit < len(b) { + c.b = append(c.b, b[:c.writeLimit]...) + n = c.writeLimit + return + } + + c.b = append(c.b, b...) + n = len(b) + return +} + +func (c *testConn) Close() error { + return nil +} + +func (c *testConn) LocalAddr() net.Addr { + return nil +} + +func (c *testConn) RemoteAddr() net.Addr { + return nil +} + +func (c *testConn) SetDeadline(t time.Time) error { + return nil +} + +func (c *testConn) SetReadDeadline(t time.Time) error { + return nil +} + +func (c *testConn) SetWriteDeadline(t time.Time) error { + return nil +} diff --git a/psiphon/common/transforms/transforms.go b/psiphon/common/transforms/transforms.go index 2346193b4..0cc3249e3 100644 --- a/psiphon/common/transforms/transforms.go +++ b/psiphon/common/transforms/transforms.go @@ -45,7 +45,7 @@ const ( // data may be retained in the transformed data. // // For example, with the transform [2]string{"([a-b])", "\\$\\ -// {1\\}"c}, substrings consisting of the characters 'a' and 'b' will be +// {1\\}c"}, substrings consisting of the characters 'a' and 'b' will be // transformed into the same substring with a single character 'c' appended. type Spec [][2]string diff --git a/psiphon/config.go b/psiphon/config.go index 5242e374f..275e53775 100755 --- a/psiphon/config.go +++ b/psiphon/config.go @@ -813,6 +813,13 @@ type Config struct { DNSResolverCacheExtensionInitialTTLMilliseconds *int DNSResolverCacheExtensionVerifiedTTLMilliseconds *int + DirectHTTPProtocolTransformSpecs transforms.Specs + DirectHTTPProtocolTransformScopedSpecNames transforms.ScopedSpecNames + DirectHTTPProtocolTransformProbability *float64 + FrontedHTTPProtocolTransformSpecs transforms.Specs + FrontedHTTPProtocolTransformScopedSpecNames transforms.ScopedSpecNames + FrontedHTTPProtocolTransformProbability *float64 + // params is the active parameters.Parameters with defaults, config values, // and, optionally, tactics applied. // @@ -1895,6 +1902,30 @@ func (config *Config) makeConfigParameters() map[string]interface{} { applyParameters[parameters.DNSResolverCacheExtensionVerifiedTTL] = fmt.Sprintf("%dms", *config.DNSResolverCacheExtensionVerifiedTTLMilliseconds) } + if config.DirectHTTPProtocolTransformSpecs != nil { + applyParameters[parameters.DirectHTTPProtocolTransformSpecs] = config.DirectHTTPProtocolTransformSpecs + } + + if config.DirectHTTPProtocolTransformScopedSpecNames != nil { + applyParameters[parameters.DirectHTTPProtocolTransformScopedSpecNames] = config.DirectHTTPProtocolTransformScopedSpecNames + } + + if config.DirectHTTPProtocolTransformProbability != nil { + applyParameters[parameters.DirectHTTPProtocolTransformProbability] = *config.DirectHTTPProtocolTransformProbability + } + + if config.FrontedHTTPProtocolTransformSpecs != nil { + applyParameters[parameters.FrontedHTTPProtocolTransformSpecs] = config.FrontedHTTPProtocolTransformSpecs + } + + if config.FrontedHTTPProtocolTransformScopedSpecNames != nil { + applyParameters[parameters.FrontedHTTPProtocolTransformScopedSpecNames] = config.FrontedHTTPProtocolTransformScopedSpecNames + } + + if config.FrontedHTTPProtocolTransformProbability != nil { + applyParameters[parameters.FrontedHTTPProtocolTransformProbability] = *config.FrontedHTTPProtocolTransformProbability + } + // When adding new config dial parameters that may override tactics, also // update setDialParametersHash. @@ -2303,6 +2334,44 @@ func (config *Config) setDialParametersHash() { binary.Write(hash, binary.LittleEndian, int64(*config.DNSResolverCacheExtensionVerifiedTTLMilliseconds)) } + if config.DirectHTTPProtocolTransformSpecs != nil { + hash.Write([]byte("DirectHTTPProtocolTransformSpecs")) + encodedDirectHTTPProtocolTransformSpecs, _ := + json.Marshal(config.DirectHTTPProtocolTransformSpecs) + hash.Write(encodedDirectHTTPProtocolTransformSpecs) + } + + if config.DirectHTTPProtocolTransformScopedSpecNames != nil { + hash.Write([]byte("")) + encodedDirectHTTPProtocolTransformScopedSpecNames, _ := + json.Marshal(config.DirectHTTPProtocolTransformScopedSpecNames) + hash.Write(encodedDirectHTTPProtocolTransformScopedSpecNames) + } + + if config.DirectHTTPProtocolTransformProbability != nil { + hash.Write([]byte("DirectHTTPProtocolTransformProbability")) + binary.Write(hash, binary.LittleEndian, *config.DirectHTTPProtocolTransformProbability) + } + + if config.FrontedHTTPProtocolTransformSpecs != nil { + hash.Write([]byte("FrontedHTTPProtocolTransformSpecs")) + encodedFrontedHTTPProtocolTransformSpecs, _ := + json.Marshal(config.FrontedHTTPProtocolTransformSpecs) + hash.Write(encodedFrontedHTTPProtocolTransformSpecs) + } + + if config.FrontedHTTPProtocolTransformScopedSpecNames != nil { + hash.Write([]byte("")) + encodedFrontedHTTPProtocolTransformScopedSpecNames, _ := + json.Marshal(config.FrontedHTTPProtocolTransformScopedSpecNames) + hash.Write(encodedFrontedHTTPProtocolTransformScopedSpecNames) + } + + if config.FrontedHTTPProtocolTransformProbability != nil { + hash.Write([]byte("FrontedHTTPProtocolTransformProbability")) + binary.Write(hash, binary.LittleEndian, *config.FrontedHTTPProtocolTransformProbability) + } + config.dialParametersHash = hash.Sum(nil) } diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index 1a013c839..08007a107 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -38,6 +38,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/resolver" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/transforms" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/values" utls "github.com/refraction-networking/utls" regen "github.com/zach-klippenstein/goregen" @@ -143,6 +144,8 @@ type DialParameters struct { resolver *resolver.Resolver `json:"-"` ResolveParameters *resolver.ResolveParameters + HTTPTransformerParameters *transforms.HTTPTransformerParameters + dialConfig *DialConfig `json:"-"` meekConfig *MeekConfig `json:"-"` } @@ -196,6 +199,7 @@ func MakeDialParameters( replayAPIRequestPadding := p.Bool(parameters.ReplayAPIRequestPadding) replayHoldOffTunnel := p.Bool(parameters.ReplayHoldOffTunnel) replayResolveParameters := p.Bool(parameters.ReplayResolveParameters) + replayHTTPTransformerParameters := p.Bool(parameters.ReplayHTTPTransformerParameters) // Check for existing dial parameters for this server/network ID. @@ -763,6 +767,22 @@ func MakeDialParameters( } + if (!isReplay || !replayHTTPTransformerParameters) && protocol.TunnelProtocolUsesMeekHTTP(dialParams.TunnelProtocol) { + + isFronted := protocol.TunnelProtocolUsesFrontedMeek(dialParams.TunnelProtocol) + + params, err := makeHTTPTransformerParameters(config.GetParameters().Get(), serverEntry.FrontingProviderID, isFronted) + if err != nil { + return nil, errors.Trace(err) + } + + if params.ProtocolTransformSpec != nil { + dialParams.HTTPTransformerParameters = params + } else { + dialParams.HTTPTransformerParameters = nil + } + } + // Set dial address fields. This portion of configuration is // deterministic, given the parameters established or replayed so far. @@ -958,6 +978,7 @@ func MakeDialParameters( MeekObfuscatedKey: serverEntry.MeekObfuscatedKey, MeekObfuscatorPaddingSeed: dialParams.MeekObfuscatorPaddingSeed, NetworkLatencyMultiplier: dialParams.NetworkLatencyMultiplier, + HTTPTransformerParameters: dialParams.HTTPTransformerParameters, } // Use an asynchronous callback to record the resolved IP address when @@ -1066,11 +1087,11 @@ func (dialParams *DialParameters) GetTLSVersionForMetrics() string { // There are two concerns regarding which dial parameter fields are safe to // exchange: // -// - Unlike signed server entries, there's no independent trust anchor -// that can certify that the exchange data is valid. +// - Unlike signed server entries, there's no independent trust anchor +// that can certify that the exchange data is valid. // -// - While users should only perform the exchange with trusted peers, -// the user's trust in their peer may be misplaced. +// - While users should only perform the exchange with trusted peers, +// the user's trust in their peer may be misplaced. // // This presents the possibility of attack such as the peer sending dial // parameters that could be used to trace/monitor/flag the importer; or @@ -1355,3 +1376,60 @@ func selectHostName( return hostName } + +// makeHTTPTransformerParameters generates HTTPTransformerParameters using the +// input tactics parameters and optional frontingProviderID context. +func makeHTTPTransformerParameters(p parameters.ParametersAccessor, + frontingProviderID string, isFronted bool) (*transforms.HTTPTransformerParameters, error) { + + params := transforms.HTTPTransformerParameters{} + + // Select an HTTP transform. If the request is fronted, HTTP request + // transforms are "scoped" by fronting provider ID. Otherwise, a transform + // from the default scope (transforms.SCOPE_ANY == "") is selected. + + var specsKey string + var scopedSpecsNamesKey string + + useTransform := false + scope := transforms.SCOPE_ANY + + if isFronted { + if p.WeightedCoinFlip(parameters.FrontedHTTPProtocolTransformProbability) { + useTransform = true + scope = frontingProviderID + specsKey = parameters.FrontedHTTPProtocolTransformSpecs + scopedSpecsNamesKey = parameters.FrontedHTTPProtocolTransformScopedSpecNames + } + } else { + // unfronted + if p.WeightedCoinFlip(parameters.DirectHTTPProtocolTransformProbability) { + useTransform = true + specsKey = parameters.DirectHTTPProtocolTransformSpecs + scopedSpecsNamesKey = parameters.DirectHTTPProtocolTransformScopedSpecNames + } + } + + if useTransform { + + specs := p.ProtocolTransformSpecs( + specsKey) + scopedSpecNames := p.ProtocolTransformScopedSpecNames( + scopedSpecsNamesKey) + + name, spec := specs.Select(scope, scopedSpecNames) + + if spec != nil { + params.ProtocolTransformName = name + params.ProtocolTransformSpec = spec + var err error + // transform seed generated + params.ProtocolTransformSeed, err = prng.NewSeed() + if err != nil { + return nil, errors.Trace(err) + } + } + } + + return ¶ms, nil +} diff --git a/psiphon/dialParameters_test.go b/psiphon/dialParameters_test.go index de14ecbdc..79f9f7ae4 100644 --- a/psiphon/dialParameters_test.go +++ b/psiphon/dialParameters_test.go @@ -33,6 +33,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/transforms" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/values" ) @@ -89,6 +90,9 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { applyParameters[parameters.HoldOffTunnelFrontingProviderIDs] = []string{frontingProviderID} applyParameters[parameters.HoldOffTunnelProbability] = 1.0 applyParameters[parameters.DNSResolverAlternateServers] = []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"} + applyParameters[parameters.DirectHTTPProtocolTransformProbability] = 1.0 + applyParameters[parameters.DirectHTTPProtocolTransformSpecs] = transforms.Specs{"spec": transforms.Spec{{"", ""}}} + applyParameters[parameters.DirectHTTPProtocolTransformScopedSpecNames] = transforms.ScopedSpecNames{"": {"spec"}} err = clientConfig.SetParameters("tag1", false, applyParameters) if err != nil { t.Fatalf("SetParameters failed: %s", err) @@ -356,6 +360,12 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { t.Fatalf("mismatching ResolveParameters fields") } + if (replayDialParams.HTTPTransformerParameters == nil) != (dialParams.HTTPTransformerParameters == nil) || + (replayDialParams.HTTPTransformerParameters != nil && + !reflect.DeepEqual(replayDialParams.HTTPTransformerParameters, dialParams.HTTPTransformerParameters)) { + t.Fatalf("mismatching HTTPTransformerParameters fields") + } + // Test: no replay after change tactics applyParameters[parameters.ReplayDialParametersTTL] = "1s" @@ -702,3 +712,97 @@ func makeMockServerEntries( return serverEntries } + +func TestMakeHTTPTransformerParameters(t *testing.T) { + + type test struct { + name string + frontingProviderID string + isFronted bool + paramValues map[string]interface{} + expectedTransformName string + expectedTransformSpec transforms.Spec + } + + tests := []test{ + { + name: "unfronted", + frontingProviderID: "", + isFronted: false, + paramValues: map[string]interface{}{ + "DirectHTTPProtocolTransformProbability": 1, + "DirectHTTPProtocolTransformSpecs": transforms.Specs{ + "spec1": {{"A", "B"}}, + }, + "DirectHTTPProtocolTransformScopedSpecNames": transforms.ScopedSpecNames{ + "": {"spec1"}, + }, + }, + expectedTransformName: "spec1", + expectedTransformSpec: [][2]string{{"A", "B"}}, + }, + { + name: "fronted", + frontingProviderID: "frontingProvider", + isFronted: true, + paramValues: map[string]interface{}{ + "FrontedHTTPProtocolTransformProbability": 1, + "FrontedHTTPProtocolTransformSpecs": transforms.Specs{ + "spec1": {{"A", "B"}}, + }, + "FrontedHTTPProtocolTransformScopedSpecNames": transforms.ScopedSpecNames{ + "frontingProvider": {"spec1"}, + }, + }, + expectedTransformName: "spec1", + expectedTransformSpec: [][2]string{{"A", "B"}}, + }, + { + name: "no transform, coinflip false", + frontingProviderID: "frontingProvider", + isFronted: false, + paramValues: map[string]interface{}{ + "DirectHTTPProtocolTransformProbability": 0, + "DirectHTTPProtocolTransformSpecs": transforms.Specs{ + "spec1": {{"A", "B"}}, + }, + "DirectHTTPProtocolTransformScopedSpecNames": transforms.ScopedSpecNames{ + "frontingProvider": {"spec1"}, + }, + }, + expectedTransformName: "", + expectedTransformSpec: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + params, err := parameters.NewParameters(nil) + if err != nil { + t.Fatalf("parameters.NewParameters failed %v", err) + } + + _, err = params.Set("", false, tt.paramValues) + if err != nil { + t.Fatalf("params.Set failed %v", err) + } + + httpTransformerParams, err := makeHTTPTransformerParameters(params.Get(), tt.frontingProviderID, tt.isFronted) + if err != nil { + t.Fatalf("MakeHTTPTransformerParameters failed %v", err) + } + if httpTransformerParams.ProtocolTransformName != tt.expectedTransformName { + t.Fatalf("expected ProtocolTransformName \"%s\" but got \"%s\"", tt.expectedTransformName, httpTransformerParams.ProtocolTransformName) + } + if !reflect.DeepEqual(httpTransformerParams.ProtocolTransformSpec, tt.expectedTransformSpec) { + t.Fatalf("expected ProtocolTransformSpec %v but got %v", tt.expectedTransformSpec, httpTransformerParams.ProtocolTransformSpec) + } + if httpTransformerParams.ProtocolTransformSpec != nil { + if httpTransformerParams.ProtocolTransformSeed == nil { + t.Fatalf("expected non-nil seed") + } + } + }) + } +} diff --git a/psiphon/meekConn.go b/psiphon/meekConn.go index ea0e694db..35ffda4f4 100644 --- a/psiphon/meekConn.go +++ b/psiphon/meekConn.go @@ -44,6 +44,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/quic" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/transforms" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/values" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/upstreamproxy" "golang.org/x/crypto/nacl/box" @@ -188,6 +189,10 @@ type MeekConfig struct { MeekCookieEncryptionPublicKey string MeekObfuscatedKey string MeekObfuscatorPaddingSeed *prng.Seed + + // HTTPTransformerParameters specifies an HTTP transformer to apply to the + // meek connection if it uses HTTP. + HTTPTransformerParameters *transforms.HTTPTransformerParameters } // MeekConn is a network connection that tunnels net.Conn flows over HTTP and supports @@ -585,6 +590,15 @@ func DialMeek( } } + if protocol.TunnelProtocolUsesMeekHTTP(meekConfig.ClientTunnelProtocol) { + // Only apply transformer if it will perform a transform; otherwise + // applying a no-op transform will incur an unnecessary performance + // cost. + if meekConfig.HTTPTransformerParameters != nil && meekConfig.HTTPTransformerParameters.ProtocolTransformSpec != nil { + dialer = transforms.WrapDialerWithHTTPTransformer(dialer, meekConfig.HTTPTransformerParameters) + } + } + httpTransport := &http.Transport{ Proxy: proxyUrl, DialContext: dialer, diff --git a/psiphon/notice.go b/psiphon/notice.go index 7d95405e5..c293717e7 100644 --- a/psiphon/notice.go +++ b/psiphon/notice.go @@ -579,6 +579,12 @@ func noticeWithDialParameters(noticeType string, dialParams *DialParameters, pos } } + if dialParams.HTTPTransformerParameters != nil { + if dialParams.HTTPTransformerParameters.ProtocolTransformSpec != nil { + args = append(args, "HTTPTransform", dialParams.HTTPTransformerParameters.ProtocolTransformName) + } + } + if dialParams.DialConnMetrics != nil { metrics := dialParams.DialConnMetrics.GetMetrics() for name, value := range metrics { diff --git a/psiphon/server/api.go b/psiphon/server/api.go index 48b9369e5..457ade4f2 100644 --- a/psiphon/server/api.go +++ b/psiphon/server/api.go @@ -925,6 +925,7 @@ var baseDialParams = []requestParamSpec{ {"dns_preferred", isAnyString, requestParamOptional}, {"dns_transform", isAnyString, requestParamOptional}, {"dns_attempt", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"http_transform", isAnyString, requestParamOptional}, } // baseSessionAndDialParams adds baseDialParams to baseSessionParams. diff --git a/psiphon/serverApi.go b/psiphon/serverApi.go index ca021e020..1bbc65268 100644 --- a/psiphon/serverApi.go +++ b/psiphon/serverApi.go @@ -1104,6 +1104,12 @@ func getBaseAPIParameters( params["dns_transform"] = dialParams.ResolveParameters.ProtocolTransformName } + if dialParams.HTTPTransformerParameters != nil { + if dialParams.HTTPTransformerParameters.ProtocolTransformSpec != nil { + params["http_transform"] = dialParams.HTTPTransformerParameters.ProtocolTransformName + } + } + params["dns_attempt"] = strconv.Itoa( dialParams.ResolveParameters.GetFirstAttemptWithAnswer()) } From 64ba57ffa7ab0eed8525f06ecc2fd6bb5de47fb9 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Wed, 22 Feb 2023 15:42:09 -0500 Subject: [PATCH 21/44] Fix: return number of byte buffered --- psiphon/common/transforms/httpTransformer.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index eb681acb4..a02cdb613 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -131,7 +131,8 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { n, err := strconv.ParseUint(string(cl), 10, 63) if err != nil { - return 0, errors.Trace(err) + // b buffered in t.b + return len(b), errors.Trace(err) } t.remain = n @@ -144,7 +145,8 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { if t.transform != nil { newHeaderS, err := t.transform.Apply(t.seed, string(header)) if err != nil { - return 0, errors.Trace(err) + // b buffered in t.b + return len(b), errors.Trace(err) } newHeader := []byte(newHeaderS) @@ -160,7 +162,8 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { } if math.MaxUint64-t.remain < uint64(len(header)) { - return 0, errors.TraceNew("t.remain + uint64(len(header)) overflows") + // b buffered in t.b + return len(b), errors.TraceNew("t.remain + uint64(len(header)) overflows") } t.remain += uint64(len(header)) From bf0ca25466324cc4c0d31f50c20550706f327029 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Wed, 22 Feb 2023 16:02:56 -0500 Subject: [PATCH 22/44] Return unmodified error from Conn.Write() --- psiphon/common/transforms/httpTransformer.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index a02cdb613..09a6541a5 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -199,7 +199,11 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { n, err := t.Conn.Write(b) if uint64(n) > t.remain { - return 0, errors.TraceNew("t.remain - uint64(n) underflows") + // Attempt to recover by resetting t.remain. If b contains bytes of + // subsequent request(s) (should never happen), then these request(s) + // may be mangled. + t.remain = 0 + return n, errors.TraceNew("t.remain - uint64(n) underflows") } t.remain -= uint64(n) @@ -211,7 +215,7 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { t.remain = 0 } - return n, errors.Trace(err) + return n, err } func (t *HTTPTransformer) writeBuffer() error { @@ -219,6 +223,10 @@ func (t *HTTPTransformer) writeBuffer() error { n, err := t.Conn.Write(t.b) if uint64(n) > t.remain { + // Attempt to recover by resetting t.remain. If t.b contains bytes + // of subsequent request(s) (should never happen), then these + // request(s) may be mangled. + t.remain = 0 return errors.TraceNew("t.remain - uint64(n) underflows") } @@ -231,7 +239,7 @@ func (t *HTTPTransformer) writeBuffer() error { } if err != nil { - return errors.Trace(err) + return err } } return nil From b8e2e481f51d863630a48c2ef58b5284fa5431f7 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Wed, 22 Feb 2023 17:04:25 -0500 Subject: [PATCH 23/44] Handle multiple requests in single Write() --- psiphon/common/transforms/httpTransformer.go | 58 ++++++++++++------- .../common/transforms/httpTransformer_test.go | 29 +++++++--- 2 files changed, 59 insertions(+), 28 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index 09a6541a5..8fb6cdbe9 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -82,8 +82,7 @@ type HTTPTransformer struct { net.Conn } -// Warning: Does not handle chunked encoding and multiple HTTP -// requests written in a single Write(). Must be called synchronously. +// Warning: Does not handle chunked encoding. Must be called synchronously. func (t *HTTPTransformer) Write(b []byte) (int, error) { if t.state == httpTransformerReadHeader { @@ -170,14 +169,29 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { err = t.writeBuffer() if t.remain > 0 { + t.state = httpTransformerReadWriteBody + } else { // Entire request, header and body, has been written. Return to // waiting for next HTTP request header to arrive. - t.state = httpTransformerReadWriteBody + if len(t.b) > 0 { + // Return the number of bytes written to the underlying + // Conn and clear t.b instead of calling t.Write() with any + // remaining bytes of t.b. The caller must call Write() + // again with the unwritten, and unbuffered, bytes of b. + // Since t.remain = 0 it is guaranteed that + // len(b) - len(t.b) >= 0 because len(t.b) is the number of + // subsequent request bytes and len(b) is the number of + // trailing bytes of the current request plus the + // subsequent request bytes. + written := len(b) - len(t.b) + t.b = nil + return written, err + } } if err != nil { // b buffered in t.b - return len(b), errors.Trace(err) + return len(b), err } } @@ -196,21 +210,24 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { return 0, errors.Trace(err) } - n, err := t.Conn.Write(b) - - if uint64(n) > t.remain { - // Attempt to recover by resetting t.remain. If b contains bytes of - // subsequent request(s) (should never happen), then these request(s) - // may be mangled. - t.remain = 0 - return n, errors.TraceNew("t.remain - uint64(n) underflows") + bytesToWrite := uint64(len(b)) + if bytesToWrite > t.remain { + bytesToWrite = t.remain } + n, err := t.Conn.Write(b[:bytesToWrite]) + + // Do not need to check for underflow because n <= t.remain t.remain -= uint64(n) if t.remain <= 0 { // Entire request, header and body, has been written. Return to // waiting for next HTTP request header to arrive. + // + // Return the number of bytes written to the underlying Conn instead of + // calling t.Write() with any remaining bytes of b which were not + // written or buffered, i.e. when n < len(b). The caller must call + // Write() again with the unwritten, and unbuffered, bytes of b. t.state = httpTransformerReadHeader t.remain = 0 } @@ -219,17 +236,16 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { } func (t *HTTPTransformer) writeBuffer() error { - for len(t.b) > 0 { - n, err := t.Conn.Write(t.b) - - if uint64(n) > t.remain { - // Attempt to recover by resetting t.remain. If t.b contains bytes - // of subsequent request(s) (should never happen), then these - // request(s) may be mangled. - t.remain = 0 - return errors.TraceNew("t.remain - uint64(n) underflows") + for len(t.b) > 0 && t.remain > 0 { + + bytesToWrite := uint64(len(t.b)) + if bytesToWrite > t.remain { + bytesToWrite = t.remain } + n, err := t.Conn.Write(t.b[:bytesToWrite]) + + // Do not need to check for underflow because n <= t.remain t.remain -= uint64(n) if n == len(t.b) { diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go index 5005f41e3..8c9293377 100644 --- a/psiphon/common/transforms/httpTransformer_test.go +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -125,24 +125,39 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { connWriteLimit: 1, connWriteErrs: []error{errors.New("err1"), errors.New("err2"), errors.New("err3")}, }, - // Multiple HTTP requests written in a single write not supported so an - // error is expected. + // Multiple HTTP requests written in a single write. { name: "multiple HTTP requests written in a single write", input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", chunkSize: 999, - wantError: errors.New("t.remain - uint64(n) underflows"), }, - // Multiple HTTP requests written in a single write not supported so an - // error is expected because a write will occur where it contains both - // the end of the previous HTTP request and the start of a new one. + // Multiple HTTP requests written in a single write. A write will occur + // where it contains both the end of the previous HTTP request and the + // start of a new one. { name: "multiple HTTP requests written in chunks", input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", chunkSize: 3, - wantError: errors.New("t.remain - uint64(n) underflows"), + }, + // Multiple HTTP requests written in a single write with transform. + { + name: "multiple HTTP requests written in a single write", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 4\r\n\r\n12HTTP 1.1\r\nContent-Length: 4\r\n\r\n34", + wantOutput: "HTTP 1.1\r\nContent-Length: 100\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 100\r\n\r\n12HTTP 1.1\r\nContent-Length: 100\r\n\r\n34", + chunkSize: 999, + transform: Spec{[2]string{"4", "100"}}, + }, + // Multiple HTTP requests written in a single write with transform. A + // write will occur where it contains both the end of the previous HTTP + // request and the start of a new one. + { + name: "multiple HTTP requests written in chunks", + input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 4\r\n\r\n12", + wantOutput: "HTTP 1.1\r\nContent-Length: 100\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 100\r\n\r\n12", + chunkSize: 3, + transform: Spec{[2]string{"4", "100"}}, }, } From 8ab72b7ab9cda6b2558e0429e5743eb1a34ef256 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Thu, 23 Feb 2023 11:04:31 -0500 Subject: [PATCH 24/44] Don't fail late when no QUIC version is selected --- psiphon/dialParameters.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index 1b08e38e0..613d94f98 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -685,6 +685,15 @@ func MakeDialParameters( isFronted := protocol.TunnelProtocolUsesFrontedMeekQUIC(dialParams.TunnelProtocol) dialParams.QUICVersion = selectQUICVersion(isFronted, serverEntry, p) + // Due to potential tactics configurations, it may be that no QUIC + // version is selected. Abort immediately, with no error, as in the + // selectProtocol case. quic.Dial and quic.NewQUICTransporter will + // check for a missing QUIC version, but at that later stage an + // unnecessary failed_tunnel can be logged in this scenario. + if dialParams.QUICVersion == "" { + return nil, nil + } + if protocol.QUICVersionHasRandomizedClientHello(dialParams.QUICVersion) { dialParams.QUICClientHelloSeed, err = prng.NewSeed() if err != nil { From 8758db0e9c4bcce4e7fabc6e5ddfdf058d173b80 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Wed, 22 Feb 2023 18:48:41 -0500 Subject: [PATCH 25/44] Fix: handle error in initial header write --- psiphon/common/transforms/httpTransformer.go | 123 ++++++++++++------ .../common/transforms/httpTransformer_test.go | 106 ++++++++++----- 2 files changed, 150 insertions(+), 79 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index 8fb6cdbe9..5849ffe15 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -122,19 +122,29 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { } } if len(cl) == 0 { - // Either Content-Length header missing or Content-Length - // header value is empty, e.g. "Content-Length: ". - // b buffered in t.b + // Irrecoverable error because either Content-Length header + // missing, or Content-Length header value is empty, e.g. + // "Content-Length: ", and request body length cannot be + // determined. + // + // b buffered in t.b, return len(b) in an attempt to get + // through the current Write() sequence instead of getting + // stuck. return len(b), errors.TraceNew("Content-Length missing") } - n, err := strconv.ParseUint(string(cl), 10, 63) + contentLength, err := strconv.ParseUint(string(cl), 10, 63) if err != nil { - // b buffered in t.b + // Irrecoverable error because Content-Length is malformed and + // request body length cannot be determined. + // + // b buffered in t.b, return len(b) in an attempt to get + // through the current Write() sequence instead of getting + // stuck. return len(b), errors.Trace(err) } - t.remain = n + t.remain = contentLength // transform and write header @@ -144,7 +154,13 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { if t.transform != nil { newHeaderS, err := t.transform.Apply(t.seed, string(header)) if err != nil { - // b buffered in t.b + // TODO: consider logging an error and skiping transform + // instead of returning an error, if the transform is broken + // then all subsequent applications may fail. + // + // b buffered in t.b, return len(b) in an attempt to get + // through the current Write() sequence instead of getting + // stuck. return len(b), errors.Trace(err) } @@ -161,41 +177,45 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { } if math.MaxUint64-t.remain < uint64(len(header)) { - // b buffered in t.b + // Irrecoverable error because request is malformed: + // Content-Length + len(header) > math.MaxUint64. + // + // b buffered in t.b, return len(b) in an attempt to get + // through the current Write() sequence instead of getting + // stuck. return len(b), errors.TraceNew("t.remain + uint64(len(header)) overflows") } t.remain += uint64(len(header)) - err = t.writeBuffer() + n, err := t.writeBuffer() + + written := len(b) // all bytes of b buffered in t.b + + if n < len(header) || + len(t.b) > 0 && t.remain == 0 { + // All bytes of b were not written, but all bytes of b have been + // buffered in t.b. Drop 1 byte of b from t.b to pretend 1 byte + // of b was not written to trigger another Write() call. This + // handles the scenario where all request bytes have been + // received but writing to the underlying net.Conn fails and + // another Write() call cannot be expected unless a value + // less than len(b) is returned. An alternative solution would + // be to retry writes, or spawn a goroutine which writes t.b, + // but we want to return the error to the caller immediately so + // it can act accordingly. + written = len(b) - 1 + t.b = t.b[:len(t.b)-1] + } if t.remain > 0 { t.state = httpTransformerReadWriteBody - } else { - // Entire request, header and body, has been written. Return to - // waiting for next HTTP request header to arrive. - if len(t.b) > 0 { - // Return the number of bytes written to the underlying - // Conn and clear t.b instead of calling t.Write() with any - // remaining bytes of t.b. The caller must call Write() - // again with the unwritten, and unbuffered, bytes of b. - // Since t.remain = 0 it is guaranteed that - // len(b) - len(t.b) >= 0 because len(t.b) is the number of - // subsequent request bytes and len(b) is the number of - // trailing bytes of the current request plus the - // subsequent request bytes. - written := len(b) - len(t.b) - t.b = nil - return written, err - } } - if err != nil { - // b buffered in t.b - return len(b), err - } + return written, err } - // b buffered in t.b + // b buffered in t.b and the entire HTTP request header has not been + // recieved so another Write() call is expected. return len(b), nil } @@ -204,18 +224,19 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { // Must write buffered bytes first, in-order, to write bytes to underlying // Conn in the same order they were received in. - err := t.writeBuffer() + _, err := t.writeBuffer() if err != nil { // b not written or buffered return 0, errors.Trace(err) } - bytesToWrite := uint64(len(b)) - if bytesToWrite > t.remain { - bytesToWrite = t.remain + // Only write bytes of current request + writeN := uint64(len(b)) + if writeN > t.remain { + writeN = t.remain } - n, err := t.Conn.Write(b[:bytesToWrite]) + n, err := t.Conn.Write(b[:writeN]) // Do not need to check for underflow because n <= t.remain t.remain -= uint64(n) @@ -235,15 +256,29 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { return n, err } -func (t *HTTPTransformer) writeBuffer() error { +func (t *HTTPTransformer) writeBuffer() (written int, err error) { + + // Continue writing buffered bytes until either all buffered bytes have + // been written or all remaining bytes of the current HTTP request have + // been written. for len(t.b) > 0 && t.remain > 0 { - bytesToWrite := uint64(len(t.b)) - if bytesToWrite > t.remain { - bytesToWrite = t.remain + // Write all buffered bytes of the current request + writeN := uint64(len(t.b)) + if writeN > t.remain { + // t.b contains bytes of the next request(s), only write current + // request bytes. + writeN = t.remain + } + + // Check for potential overflow before Write() call + if math.MaxInt-written < int(writeN) { + return written, errors.TraceNew("written + bytesToWrite overflows") } - n, err := t.Conn.Write(t.b[:bytesToWrite]) + var n int + n, err = t.Conn.Write(t.b[:writeN]) + written += n // Do not need to check for underflow because n <= t.remain t.remain -= uint64(n) @@ -254,11 +289,13 @@ func (t *HTTPTransformer) writeBuffer() error { t.b = t.b[n:] } + // Stop writing and return if there was an error if err != nil { - return err + return } } - return nil + + return } func WrapDialerWithHTTPTransformer(dialer common.Dialer, params *HTTPTransformerParameters) common.Dialer { diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go index 8c9293377..5c490f1a5 100644 --- a/psiphon/common/transforms/httpTransformer_test.go +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -45,60 +45,80 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { chunkSize int transform Spec connWriteLimit int + connWriteLens []int connWriteErrs []error } tests := []test{ { - name: "no transform", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + name: "written in chunks", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 1, }, { - name: "no transform with partial write and errors", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + name: "written in a single write", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 999, + }, + { + name: "written in single write with error", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 999, + connWriteErrs: []error{errors.New("err1")}, + }, + { + name: "written with partial write and errors", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 1, connWriteLimit: 1, connWriteErrs: []error{errors.New("err1"), errors.New("err2")}, }, { name: "transform not applied to body", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 1, transform: Spec{[2]string{"abcd", "efgh"}}, }, { name: "Content-Length missing", - input: "HTTP 1.1\r\n\r\nabcd", + input: "POST / HTTP/1.1\r\n\r\nabcd", wantError: errors.New("Content-Length missing"), chunkSize: 1, }, { name: "Content-Length overflow", - input: fmt.Sprintf("HTTP 1.1\r\nContent-Length: %d\r\n\r\nabcd", uint64(math.MaxUint64)), + input: fmt.Sprintf("POST / HTTP/1.1\r\nContent-Length: %d\r\n\r\nabcd", uint64(math.MaxUint64)), wantError: errors.New("strconv.ParseUint: parsing \"18446744073709551615\": value out of range"), chunkSize: 1, }, { - name: "no transform", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", + name: "incorrect Content-Length header value", + input: "POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc", chunkSize: 1, }, { - name: "incorrect Content-Length header value", - input: "HTTP 1.1\r\nContent-Length: 3\r\n\r\nabcd", - wantOutput: "HTTP 1.1\r\nContent-Length: 3\r\n\r\nabc", - chunkSize: 1, + name: "written in a single write with errors and partial writes", + input: "POST / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 0\r\n\r\n", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 0\r\n\r\n", + chunkSize: 999, + transform: Spec{[2]string{"Host: example.com\r\n", ""}}, + connWriteErrs: []error{errors.New("err1"), nil, errors.New("err2"), nil, nil, errors.New("err3")}, + connWriteLens: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, }, { - name: "single HTTP request written in a single write", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcd", - chunkSize: 999, + name: "written in a single write with error and partial write", + input: "POST / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 999, + transform: Spec{[2]string{"Host: example.com\r\n", ""}}, + connWriteErrs: []error{errors.New("err1")}, + connWriteLens: []int{28}, // write lands mid "\r\n\r\n" }, { name: "transform", @@ -128,8 +148,8 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { // Multiple HTTP requests written in a single write. { name: "multiple HTTP requests written in a single write", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", chunkSize: 999, }, // Multiple HTTP requests written in a single write. A write will occur @@ -137,15 +157,15 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { // start of a new one. { name: "multiple HTTP requests written in chunks", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", - wantOutput: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 2\r\n\r\n12", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", chunkSize: 3, }, // Multiple HTTP requests written in a single write with transform. { - name: "multiple HTTP requests written in a single write", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 4\r\n\r\n12HTTP 1.1\r\nContent-Length: 4\r\n\r\n34", - wantOutput: "HTTP 1.1\r\nContent-Length: 100\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 100\r\n\r\n12HTTP 1.1\r\nContent-Length: 100\r\n\r\n34", + name: "multiple HTTP requests written in a single write with transform", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n12POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n34", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n12POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n34", chunkSize: 999, transform: Spec{[2]string{"4", "100"}}, }, @@ -153,10 +173,10 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { // write will occur where it contains both the end of the previous HTTP // request and the start of a new one. { - name: "multiple HTTP requests written in chunks", - input: "HTTP 1.1\r\nContent-Length: 4\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 4\r\n\r\n12", - wantOutput: "HTTP 1.1\r\nContent-Length: 100\r\n\r\nabcdHTTP 1.1\r\nContent-Length: 100\r\n\r\n12", - chunkSize: 3, + name: "multiple HTTP requests written in chunks with transform", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n12", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n12", + chunkSize: 4, // ensure one write contains bytes from both reqs transform: Spec{[2]string{"4", "100"}}, }, } @@ -171,6 +191,7 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { conn := testConn{ writeLimit: tt.connWriteLimit, + writeLens: tt.connWriteLens, writeErrs: tt.connWriteErrs, } @@ -354,6 +375,10 @@ type testConn struct { // writeLimit is the max number of bytes that will be written in a Write() // call. writeLimit int + // writeLens are returned from Write() calls in order and determine the + // max number of bytes that will be written. Overrides writeLimit if + // non-empty. If empty, then the value of writeLimit is returned. + writeLens []int // writeErrs are returned from Write() calls in order. If empty, then a nil // error is returned. writeErrs []error @@ -370,14 +395,23 @@ func (c *testConn) Write(b []byte) (n int, err error) { c.writeErrs = c.writeErrs[1:] } - if c.writeLimit != 0 && c.writeLimit < len(b) { + if len(c.writeLens) > 0 { + n = c.writeLens[0] + c.writeLens = c.writeLens[1:] + if len(b) <= n { + c.b = append(c.b, b...) + n = len(b) + } else { + c.b = append(c.b, b[:n]...) + } + } else if c.writeLimit != 0 && c.writeLimit < len(b) { c.b = append(c.b, b[:c.writeLimit]...) n = c.writeLimit - return + } else { + c.b = append(c.b, b...) + n = len(b) } - c.b = append(c.b, b...) - n = len(b) return } From f2e6233a5c402c47b76ce733707962646cd9d965 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Sat, 25 Feb 2023 13:22:42 -0500 Subject: [PATCH 26/44] Revert: handle single req bytes per Write() call Partial revert of 8758db0e and b8e2e481 --- psiphon/common/transforms/httpTransformer.go | 250 ++++++++---------- .../common/transforms/httpTransformer_test.go | 205 +++++++++----- 2 files changed, 250 insertions(+), 205 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index 5849ffe15..93dd6ca2e 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -51,9 +51,9 @@ type HTTPTransformerParameters struct { } const ( - // httpTransformerReadHeader HTTPTransformer is waiting to finish reading - // the next HTTP request header. - httpTransformerReadHeader = 0 + // httpTransformerReadWriteHeader HTTPTransformer is waiting to finish + // reading and writing the next HTTP request header. + httpTransformerReadWriteHeader = 0 // httpTransformerReadWriteBody HTTPTransformer is waiting to finish reading // and writing the current HTTP request body. httpTransformerReadWriteBody = 1 @@ -82,205 +82,171 @@ type HTTPTransformer struct { net.Conn } -// Warning: Does not handle chunked encoding. Must be called synchronously. +// Write implements the net.Conn interface. +// +// Note: it is assumed that the underlying transport, net.Conn, is a reliable +// stream transport, i.e. TCP, therefore it is required that the caller stop +// calling Write() on an instance of HTTPTransformer after an error is returned +// because, following this assumption, the connection will have failed when a +// Write() call to the underlying net.Conn fails; a new connection must be +// established, net.Conn, and wrapped with a new HTTPTransformer. For this +// reason, the return value may be the number of bytes buffered internally +// and not the number of bytes written to the underlying net.Conn when a non-nil +// error is returned. +// +// Warning: Does not handle chunked encoding and multiple HTTP requests written +// in a single Write(). Must be called synchronously. func (t *HTTPTransformer) Write(b []byte) (int, error) { - if t.state == httpTransformerReadHeader { + if t.state == httpTransformerReadWriteHeader { t.b = append(t.b, b...) // Wait until the entire HTTP request header has been read. Must check // all accumulated bytes incase the "\r\n\r\n" separator is written over - // multiple Write() calls; from reading the net/http code the entire - // HTTP request is written in a single Write() call. + // multiple Write() calls; from reading the go1.19.5 net/http code the + // entire HTTP request is written in a single Write() call. sep := []byte("\r\n\r\n") headerBodyLines := bytes.SplitN(t.b, sep, 2) // split header and body - if len(headerBodyLines) > 1 { + if len(headerBodyLines) <= 1 { + // b buffered in t.b and the entire HTTP request header has not been + // recieved so another Write() call is expected. + return len(b), nil + } // else: HTTP request header has been read - // read Content-Length before applying transform + // read Content-Length before applying transform - var headerLines [][]byte - - lines := bytes.Split(headerBodyLines[0], []byte("\r\n")) - if len(lines) > 1 { - // skip request line, e.g. "GET /foo HTTP/1.1" - headerLines = lines[1:] - } + var headerLines [][]byte - var cl []byte - contentLengthHeader := []byte("Content-Length:") + lines := bytes.Split(headerBodyLines[0], []byte("\r\n")) + if len(lines) > 1 { + // skip request line, e.g. "GET /foo HTTP/1.1" + headerLines = lines[1:] + } - for _, header := range headerLines { + var cl []byte + contentLengthHeader := []byte("Content-Length:") - if bytes.HasPrefix(header, contentLengthHeader) { + for _, header := range headerLines { - cl = textproto.TrimBytes(header[len(contentLengthHeader):]) - break - } - } - if len(cl) == 0 { - // Irrecoverable error because either Content-Length header - // missing, or Content-Length header value is empty, e.g. - // "Content-Length: ", and request body length cannot be - // determined. - // - // b buffered in t.b, return len(b) in an attempt to get - // through the current Write() sequence instead of getting - // stuck. - return len(b), errors.TraceNew("Content-Length missing") - } + if bytes.HasPrefix(header, contentLengthHeader) { - contentLength, err := strconv.ParseUint(string(cl), 10, 63) - if err != nil { - // Irrecoverable error because Content-Length is malformed and - // request body length cannot be determined. - // - // b buffered in t.b, return len(b) in an attempt to get - // through the current Write() sequence instead of getting - // stuck. - return len(b), errors.Trace(err) + cl = textproto.TrimBytes(header[len(contentLengthHeader):]) + break } + } + if len(cl) == 0 { + // Irrecoverable error because either Content-Length header + // missing, or Content-Length header value is empty, e.g. + // "Content-Length: ", and request body length cannot be + // determined. + return len(b), errors.TraceNew("Content-Length missing") + } - t.remain = contentLength + contentLength, err := strconv.ParseUint(string(cl), 10, 63) + if err != nil { + // Irrecoverable error because Content-Length is malformed and + // request body length cannot be determined. + return len(b), errors.Trace(err) + } - // transform and write header + t.remain = contentLength - headerLen := len(headerBodyLines[0]) + len(sep) - header := t.b[:headerLen] + // transform and write header - if t.transform != nil { - newHeaderS, err := t.transform.Apply(t.seed, string(header)) - if err != nil { - // TODO: consider logging an error and skiping transform - // instead of returning an error, if the transform is broken - // then all subsequent applications may fail. - // - // b buffered in t.b, return len(b) in an attempt to get - // through the current Write() sequence instead of getting - // stuck. - return len(b), errors.Trace(err) - } + headerLen := len(headerBodyLines[0]) + len(sep) + header := t.b[:headerLen] - newHeader := []byte(newHeaderS) + if t.transform != nil { + newHeaderS, err := t.transform.Apply(t.seed, string(header)) + if err != nil { + // TODO: consider logging an error and skiping transform + // instead of returning an error, if the transform is broken + // then all subsequent applications may fail. + return len(b), errors.Trace(err) + } - // only allocate new slice if header length changed - if len(newHeader) == len(header) { - copy(t.b[:len(header)], newHeader) - } else { - t.b = append(newHeader, t.b[len(header):]...) - } + newHeader := []byte(newHeaderS) - header = newHeader + // only allocate new slice if header length changed + if len(newHeader) == len(header) { + copy(t.b[:len(header)], newHeader) + } else { + t.b = append(newHeader, t.b[len(header):]...) } - if math.MaxUint64-t.remain < uint64(len(header)) { - // Irrecoverable error because request is malformed: - // Content-Length + len(header) > math.MaxUint64. - // - // b buffered in t.b, return len(b) in an attempt to get - // through the current Write() sequence instead of getting - // stuck. - return len(b), errors.TraceNew("t.remain + uint64(len(header)) overflows") - } - t.remain += uint64(len(header)) - - n, err := t.writeBuffer() - - written := len(b) // all bytes of b buffered in t.b - - if n < len(header) || - len(t.b) > 0 && t.remain == 0 { - // All bytes of b were not written, but all bytes of b have been - // buffered in t.b. Drop 1 byte of b from t.b to pretend 1 byte - // of b was not written to trigger another Write() call. This - // handles the scenario where all request bytes have been - // received but writing to the underlying net.Conn fails and - // another Write() call cannot be expected unless a value - // less than len(b) is returned. An alternative solution would - // be to retry writes, or spawn a goroutine which writes t.b, - // but we want to return the error to the caller immediately so - // it can act accordingly. - written = len(b) - 1 - t.b = t.b[:len(t.b)-1] - } + header = newHeader + } - if t.remain > 0 { - t.state = httpTransformerReadWriteBody - } + if math.MaxUint64-t.remain < uint64(len(header)) { + // Irrecoverable error because request is malformed: + // Content-Length + len(header) > math.MaxUint64. + return len(b), errors.TraceNew("t.remain + uint64(len(header)) overflows") + } + t.remain += uint64(len(header)) - return written, err + err = t.writeBuffer() + + if t.remain > 0 { + t.state = httpTransformerReadWriteBody } - // b buffered in t.b and the entire HTTP request header has not been - // recieved so another Write() call is expected. - return len(b), nil + return len(b), err } // HTTP request header has been transformed. Write any remaining bytes of // HTTP request header and then write HTTP request body. // Must write buffered bytes first, in-order, to write bytes to underlying - // Conn in the same order they were received in. - _, err := t.writeBuffer() + // net.Conn in the same order they were received in. + // + // In practise the buffer will be empty by this point because its entire + // contents will have been written in the first call to t.writeBuffer() + // when the header is received, parsed, and transformed; otherwise the + // underlying transport will have failed and the caller will not invoke + // Write() again on this instance. See HTTPTransformer.Write() comment. + err := t.writeBuffer() if err != nil { // b not written or buffered - return 0, errors.Trace(err) + return 0, err } - // Only write bytes of current request - writeN := uint64(len(b)) - if writeN > t.remain { - writeN = t.remain + if uint64(len(b)) > t.remain { + return len(b), errors.TraceNew("multiple HTTP requests in single Write() not supported") } - n, err := t.Conn.Write(b[:writeN]) + n, err := t.Conn.Write(b) - // Do not need to check for underflow because n <= t.remain t.remain -= uint64(n) if t.remain <= 0 { // Entire request, header and body, has been written. Return to // waiting for next HTTP request header to arrive. - // - // Return the number of bytes written to the underlying Conn instead of - // calling t.Write() with any remaining bytes of b which were not - // written or buffered, i.e. when n < len(b). The caller must call - // Write() again with the unwritten, and unbuffered, bytes of b. - t.state = httpTransformerReadHeader + t.state = httpTransformerReadWriteHeader t.remain = 0 } return n, err } -func (t *HTTPTransformer) writeBuffer() (written int, err error) { - - // Continue writing buffered bytes until either all buffered bytes have - // been written or all remaining bytes of the current HTTP request have - // been written. - for len(t.b) > 0 && t.remain > 0 { +func (t *HTTPTransformer) writeBuffer() error { - // Write all buffered bytes of the current request - writeN := uint64(len(t.b)) - if writeN > t.remain { - // t.b contains bytes of the next request(s), only write current - // request bytes. - writeN = t.remain - } + if uint64(len(t.b)) > t.remain { + // Should never happen, multiple requests written in a single + // Write() are not supported. + return errors.TraceNew("multiple HTTP requests in single Write() not supported") + } - // Check for potential overflow before Write() call - if math.MaxInt-written < int(writeN) { - return written, errors.TraceNew("written + bytesToWrite overflows") - } + // Continue to Write() buffered bytes to underlying net.Conn until Write() + // fails or all buffered bytes are written. + for len(t.b) > 0 { var n int - n, err = t.Conn.Write(t.b[:writeN]) - written += n + n, err := t.Conn.Write(t.b) - // Do not need to check for underflow because n <= t.remain t.remain -= uint64(n) if n == len(t.b) { @@ -291,11 +257,11 @@ func (t *HTTPTransformer) writeBuffer() (written int, err error) { // Stop writing and return if there was an error if err != nil { - return + return err } } - return + return nil } func WrapDialerWithHTTPTransformer(dialer common.Dialer, params *HTTPTransformerParameters) common.Dialer { diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go index 5c490f1a5..3a64befa9 100644 --- a/psiphon/common/transforms/httpTransformer_test.go +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -56,6 +56,20 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 1, }, + { + name: "write header then body", // behaviour of net/http code + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 38, + }, + { + name: "write header then body with error", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 38, + connWriteErrs: []error{nil, errors.New("err1")}, + wantError: errors.New("err1"), + }, { name: "written in a single write", input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", @@ -68,6 +82,7 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 999, connWriteErrs: []error{errors.New("err1")}, + wantError: errors.New("err1"), }, { name: "written with partial write and errors", @@ -75,7 +90,8 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 1, connWriteLimit: 1, - connWriteErrs: []error{errors.New("err1"), errors.New("err2")}, + connWriteErrs: []error{nil, errors.New("err1"), errors.New("err2")}, + wantError: errors.New("err1"), }, { name: "transform not applied to body", @@ -102,24 +118,6 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { wantOutput: "POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc", chunkSize: 1, }, - { - name: "written in a single write with errors and partial writes", - input: "POST / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 0\r\n\r\n", - wantOutput: "POST / HTTP/1.1\r\nContent-Length: 0\r\n\r\n", - chunkSize: 999, - transform: Spec{[2]string{"Host: example.com\r\n", ""}}, - connWriteErrs: []error{errors.New("err1"), nil, errors.New("err2"), nil, nil, errors.New("err3")}, - connWriteLens: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - }, - { - name: "written in a single write with error and partial write", - input: "POST / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 4\r\n\r\nabcd", - wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", - chunkSize: 999, - transform: Spec{[2]string{"Host: example.com\r\n", ""}}, - connWriteErrs: []error{errors.New("err1")}, - connWriteLens: []int{28}, // write lands mid "\r\n\r\n" - }, { name: "transform", input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", @@ -135,6 +133,7 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { transform: Spec{[2]string{"4", "100"}}, connWriteLimit: 1, connWriteErrs: []error{errors.New("err1"), errors.New("err2")}, + wantError: errors.New("err1"), }, { name: "transform with chunk write and errors in body write", @@ -144,13 +143,45 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { transform: Spec{[2]string{"4", "100"}}, connWriteLimit: 1, connWriteErrs: []error{errors.New("err1"), errors.New("err2"), errors.New("err3")}, + wantError: errors.New("err1"), + }, + // + // Below tests document unsupported behavior. + // + { + name: "written in a single write with errors and partial writes", + input: "POST / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 0\r\n\r\n", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 0\r\n\r\n", + chunkSize: 999, + transform: Spec{[2]string{"Host: example.com\r\n", ""}}, + connWriteErrs: []error{errors.New("err1"), nil, errors.New("err2"), nil, nil, errors.New("err3")}, + connWriteLens: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + wantError: errors.New("err1"), + }, + { + name: "written in a single write with error and partial write", + input: "POST / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + chunkSize: 999, + transform: Spec{[2]string{"Host: example.com\r\n", ""}}, + connWriteErrs: []error{errors.New("err1")}, + connWriteLens: []int{28}, // write lands mid "\r\n\r\n" + wantError: errors.New("err1"), }, - // Multiple HTTP requests written in a single write. { name: "multiple HTTP requests written in a single write", input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", chunkSize: 999, + wantError: errors.New("multiple HTTP requests in single Write() not supported"), + }, + { + name: "multiple HTTP requests written in a single write with transform", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n12POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n34", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n12POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n34", + chunkSize: 999, + transform: Spec{[2]string{"4", "100"}}, + wantError: errors.New("multiple HTTP requests in single Write() not supported"), }, // Multiple HTTP requests written in a single write. A write will occur // where it contains both the end of the previous HTTP request and the @@ -159,15 +190,8 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { name: "multiple HTTP requests written in chunks", input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 2\r\n\r\n12", - chunkSize: 3, - }, - // Multiple HTTP requests written in a single write with transform. - { - name: "multiple HTTP requests written in a single write with transform", - input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n12POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\n34", - wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n12POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n34", - chunkSize: 999, - transform: Spec{[2]string{"4", "100"}}, + chunkSize: 4, + wantError: errors.New("multiple HTTP requests in single Write() not supported"), }, // Multiple HTTP requests written in a single write with transform. A // write will occur where it contains both the end of the previous HTTP @@ -178,6 +202,7 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcdPOST / HTTP/1.1\r\nContent-Length: 100\r\n\r\n12", chunkSize: 4, // ensure one write contains bytes from both reqs transform: Spec{[2]string{"4", "100"}}, + wantError: errors.New("multiple HTTP requests in single Write() not supported"), }, } @@ -211,24 +236,19 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { } var b []byte - if len(remain) < tt.chunkSize { + if len(remain) < tt.chunkSize || tt.chunkSize == 0 { b = remain } else { b = remain[:tt.chunkSize] } - expectedErr := len(conn.writeErrs) > 0 - var n int n, err = transformer.Write(b) if err != nil { - if expectedErr { - // reset err - err = nil - } else { - // err checked outside loop - break - } + // The underlying transport will be a reliable stream + // transport, i.e. TCP, and we expect the caller to stop + // writing after an error is returned. + break } remain = remain[n:] @@ -255,10 +275,14 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { func TestHTTPTransformerHTTPServer(t *testing.T) { type test struct { - name string - request func(string) *http.Request - wantBody string - transform Spec + name string + request func(string) *http.Request + wantBody string + transform Spec + connWriteLimit int + connWriteLens []int + connWriteErrs []error + wantError error } tests := []test{ @@ -278,6 +302,26 @@ func TestHTTPTransformerHTTPServer(t *testing.T) { }, wantBody: "abc", }, + // Expect HTTP request to abort after a single Write() call on + // underlying net.Conn fails. + { + name: "transport fails", + transform: Spec{[2]string{"", ""}}, + request: func(addr string) *http.Request { + + body := bytes.NewReader([]byte("abcd")) + + req, err := http.NewRequest("POST", "http://"+addr, body) + if err != nil { + panic(err) + } + + return req + }, + wantBody: "abc", + connWriteErrs: []error{errors.New("test error")}, + wantError: errors.New("test error"), + }, } for _, tt := range tests { @@ -295,7 +339,24 @@ func TestHTTPTransformerHTTPServer(t *testing.T) { } dialer := func(ctx context.Context, network, address string) (net.Conn, error) { - return net.Dial(network, address) + + if network != "tcp" { + return nil, errors.New("expected network tcp") + } + + conn, err := net.Dial(network, address) + if err != nil { + return nil, err + } + + wrappedConn := testConn{ + Conn: conn, + writeLimit: tt.connWriteLimit, + writeLens: tt.connWriteLens, + writeErrs: tt.connWriteErrs, + } + + return &wrappedConn, nil } httpTransport := &http.Transport{ @@ -309,7 +370,9 @@ func TestHTTPTransformerHTTPServer(t *testing.T) { serverReq := make(chan *serverRequest, 1) - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + mux := http.NewServeMux() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { b, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) @@ -324,7 +387,8 @@ func TestHTTPTransformerHTTPServer(t *testing.T) { }) s := &http.Server{ - Addr: "127.0.0.1:8080", + Addr: "127.0.0.1:8080", + Handler: mux, } go func() { @@ -346,18 +410,26 @@ func TestHTTPTransformerHTTPServer(t *testing.T) { t.Fatalf("s.Shutdown failed %v", shutdownErr) } - if err != nil { - t.Fatalf("client.Do failed %v", err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatalf("expected 200 but got %d", resp.StatusCode) - } + if tt.wantError == nil { + if err != nil { + t.Fatalf("client.Do failed %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected 200 but got %d", resp.StatusCode) + } - r := <-serverReq + r := <-serverReq - if tt.wantBody != string(r.body) { - t.Fatalf("expected body %s but got %s", tt.wantBody, string(r.body)) + if tt.wantBody != string(r.body) { + t.Fatalf("expected body %s but got %s", tt.wantBody, string(r.body)) + } + } else { + // tt.wantError != nil + if err == nil { + t.Fatalf("expected error %v", tt.wantError) + } else if !strings.Contains(err.Error(), tt.wantError.Error()) { + t.Fatalf("expected error %v got %v", tt.wantError, err) + } } }) } @@ -382,10 +454,12 @@ type testConn struct { // writeErrs are returned from Write() calls in order. If empty, then a nil // error is returned. writeErrs []error + + net.Conn } func (c *testConn) Read(b []byte) (n int, err error) { - return 0, nil + return c.Conn.Read(b) } func (c *testConn) Write(b []byte) (n int, err error) { @@ -412,29 +486,34 @@ func (c *testConn) Write(b []byte) (n int, err error) { n = len(b) } + // Only write to net.Conn if set + if c.Conn != nil && n > 0 { + c.Conn.Write(b[:n]) + } + return } func (c *testConn) Close() error { - return nil + return c.Conn.Close() } func (c *testConn) LocalAddr() net.Addr { - return nil + return c.Conn.LocalAddr() } func (c *testConn) RemoteAddr() net.Addr { - return nil + return c.Conn.RemoteAddr() } func (c *testConn) SetDeadline(t time.Time) error { - return nil + return c.Conn.SetDeadline(t) } func (c *testConn) SetReadDeadline(t time.Time) error { - return nil + return c.Conn.SetReadDeadline(t) } func (c *testConn) SetWriteDeadline(t time.Time) error { - return nil + return c.Conn.SetWriteDeadline(t) } From 7d09cffe880313755f743cde0a38386e0a006a8a Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Mon, 27 Feb 2023 09:47:16 -0500 Subject: [PATCH 27/44] Fix comment --- psiphon/common/transforms/httpTransformer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index 93dd6ca2e..3c3e090aa 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -72,7 +72,7 @@ type HTTPTransformer struct { seed *prng.Seed // state is the HTTPTransformer state. Possible values are - // httpTransformerReadingHeader and httpTransformerReadingBody. + // httpTransformerReadWriteHeader and httpTransformerReadWriteBody. state int64 // b is the accumulated bytes of the current HTTP request. b []byte From b12159a3cfe69ce157d657e571ea2d02c2853c8c Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Mon, 27 Feb 2023 12:26:54 -0500 Subject: [PATCH 28/44] Add packet tunnel client-side transparent DNS rewriting Allows VPN clients to specify an arbitrary DNS server IP instead of requiring 10.0.0.2, which can conflict with some LAN private address spaces. --- .../PsiphonTunnel/PsiphonTunnel.h | 12 -- .../PsiphonTunnel/PsiphonTunnel.m | 10 -- MobileLibrary/psi/psi.go | 8 - psiphon/common/tun/tun.go | 160 ++++++++++++++++-- psiphon/common/tun/tun_test.go | 19 ++- psiphon/config.go | 12 ++ psiphon/controller.go | 9 +- 7 files changed, 174 insertions(+), 56 deletions(-) diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h index a0d90a2f0..3ff66d45a 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h @@ -408,18 +408,6 @@ Returns the path where the rotated notices file will be created. */ - (long)getPacketTunnelMTU; -/*! - Only valid in whole device mode. Provides the DNS resolver IP address that is provided by the packet tunnel to the device. - @return The IP address of the DNS resolver as a string. - */ -- (NSString * _Nonnull)getPacketTunnelDNSResolverIPv4Address; - -/*! - Only valid in whole device mode. Provides the DNS resolver IP address that is provided by the packet tunnel to the device. - @return The IP address of the DNS resolver as a string. - */ -- (NSString * _Nonnull)getPacketTunnelDNSResolverIPv6Address; - /*! Provides the tunnel-core build info json as a string. See the tunnel-core build info code for details https://github.com/Psiphon-Labs/psiphon-tunnel-core/blob/master/psiphon/common/buildinfo.go. @return The build info json as a string. diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m index 259a5da53..5701da248 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m @@ -457,16 +457,6 @@ - (long)getPacketTunnelMTU { return GoPsiGetPacketTunnelMTU(); } -// See comment in header. -- (NSString * _Nonnull)getPacketTunnelDNSResolverIPv4Address { - return GoPsiGetPacketTunnelDNSResolverIPv4Address(); -} - -// See comment in header. -- (NSString * _Nonnull)getPacketTunnelDNSResolverIPv6Address { - return GoPsiGetPacketTunnelDNSResolverIPv6Address(); -} - // See comment in header. + (NSString * _Nonnull)getBuildInfo { return GoPsiGetBuildInfo(); diff --git a/MobileLibrary/psi/psi.go b/MobileLibrary/psi/psi.go index b41752e98..896e96b54 100644 --- a/MobileLibrary/psi/psi.go +++ b/MobileLibrary/psi/psi.go @@ -481,14 +481,6 @@ func GetPacketTunnelMTU() int { return tun.DEFAULT_MTU } -func GetPacketTunnelDNSResolverIPv4Address() string { - return tun.GetTransparentDNSResolverIPv4Address().String() -} - -func GetPacketTunnelDNSResolverIPv6Address() string { - return tun.GetTransparentDNSResolverIPv6Address().String() -} - // WriteRuntimeProfiles writes Go runtime profile information to a set of // files in the specified output directory. See common.WriteRuntimeProfiles // for more details. diff --git a/psiphon/common/tun/tun.go b/psiphon/common/tun/tun.go index 43ea7d4c2..394a7cf23 100644 --- a/psiphon/common/tun/tun.go +++ b/psiphon/common/tun/tun.go @@ -87,7 +87,7 @@ the assigned IP address. The server also rewrites the destination address of certain DNS packets. The purpose of this is to allow clients to reconnect to different servers without having to tear down or change their local network configuration. Clients may configure their local tun device with an -arbitrary IP address and a static DNS resolver address. +arbitrary IP address and an arbitrary DNS resolver address. The server uses the 24-bit 10.0.0.0/8 IPv4 private address space to maximize the number of addresses available, due to Psiphon client churn and minimum @@ -849,6 +849,7 @@ func (server *Server) runDeviceDownstream() { if !processPacket( session.metrics, session, + nil, packetDirectionServerDownstream, readPacket) { // Packet is rejected and dropped. Reason will be counted in metrics. @@ -892,7 +893,7 @@ func (server *Server) runClientUpstream(session *session) { // processPacket transparently rewrites the source address to the // session's assigned address and rewrites the destination of any - // DNS packets destined to the target DNS resolver. + // DNS packets destined to the transparent DNS resolver. // // The first time the source address is rewritten, the original // value is recorded so inbound packets can have the reverse @@ -903,6 +904,7 @@ func (server *Server) runClientUpstream(session *session) { if !processPacket( session.metrics, session, + nil, packetDirectionServerUpstream, readPacket) { @@ -1101,18 +1103,6 @@ func (server *Server) convertIndexToIPv6Address(index int32) net.IP { index&0xFF)) } -// GetTransparentDNSResolverIPv4Address returns the static IPv4 address -// to use as a DNS resolver when transparent DNS rewriting is desired. -func GetTransparentDNSResolverIPv4Address() net.IP { - return transparentDNSResolverIPv4Address -} - -// GetTransparentDNSResolverIPv6Address returns the static IPv6 address -// to use as a DNS resolver when transparent DNS rewriting is desired. -func GetTransparentDNSResolverIPv6Address() net.IP { - return transparentDNSResolverIPv6Address -} - type session struct { // Note: 64-bit ints used with atomic operations are placed // at the start of struct to ensure 64-bit alignment. @@ -1861,7 +1851,7 @@ type ClientConfig struct { // transparent source IP address and DNS rewriting, the tun // device may have any assigned IP address, but should be // configured with the given MTU; and DNS should be configured - // to use the transparent DNS target resolver addresses. + // to use the specified transparent DNS resolver addresses. // Set TunFileDescriptor to <= 0 to ignore this parameter // and create and configure a tun device. TunFileDescriptor int @@ -1874,6 +1864,18 @@ type ClientConfig struct { // assign to a newly created tun device. IPv6AddressCIDR string + // TransparentDNSIPv4Address is the IPv4 address of the DNS server + // configured by a VPN using a packet tunnel. All DNS packets + // destined to this DNS server are transparently redirected to + // the Psiphon server DNS. + TransparentDNSIPv4Address string + + // TransparentDNSIPv4Address is the IPv6 address of the DNS server + // configured by a VPN using a packet tunnel. All DNS packets + // destined to this DNS server are transparently redirected to + // the Psiphon server DNS. + TransparentDNSIPv6Address string + // RouteDestinations are hosts (IPs) or networks (CIDRs) // to be configured to be routed through a newly // created tun device. @@ -1885,6 +1887,7 @@ type ClientConfig struct { // tunnel server via a transport channel. type Client struct { config *ClientConfig + transparentDNS *clientTransparentDNS device *Device channel *Channel upstreamPackets *PacketQueue @@ -1894,6 +1897,41 @@ type Client struct { workers *sync.WaitGroup } +// clientTransparentDNS caches the parsed representions of +// TransparentDNSIPv4/6Address for fast packet processing and rewriting. +type clientTransparentDNS struct { + IPv4Address net.IP + IPv6Address net.IP +} + +func newClientTransparentDNS( + IPv4Address, IPv6Address string) (*clientTransparentDNS, error) { + + var IPv4, IPv6 net.IP + + if IPv4Address != "" { + IPv4 = net.ParseIP(IPv4Address) + if IPv4 != nil { + IPv4 = IPv4.To4() + } + if IPv4 == nil { + return nil, errors.TraceNew("invalid IPv4 address") + } + } + + if IPv6Address != "" { + IPv6 = net.ParseIP(IPv6Address) + if IPv6 == nil || IPv6.To4() != nil { + return nil, errors.TraceNew("invalid IPv6 address") + } + } + + return &clientTransparentDNS{ + IPv4Address: IPv4, + IPv6Address: IPv6, + }, nil +} + // NewClient initializes a new Client. Unless using the // TunFileDescriptor configuration parameter, a new tun // device is created for the client. @@ -1917,10 +1955,18 @@ func NewClient(config *ClientConfig) (*Client, error) { upstreamPacketQueueSize = config.UpstreamPacketQueueSize } + transparentDNS, err := newClientTransparentDNS( + config.TransparentDNSIPv4Address, + config.TransparentDNSIPv6Address) + if err != nil { + return nil, errors.Trace(err) + } + runContext, stopRunning := context.WithCancel(context.Background()) return &Client{ config: config, + transparentDNS: transparentDNS, device: device, channel: NewChannel(config.Transport, getMTU(config.MTU)), upstreamPackets: NewPacketQueue(upstreamPacketQueueSize), @@ -1965,6 +2011,7 @@ func (client *Client) Start() { if !processPacket( client.metrics, nil, + client.transparentDNS, packetDirectionClientUpstream, readPacket) { continue @@ -2029,6 +2076,7 @@ func (client *Client) Start() { if !processPacket( client.metrics, nil, + client.transparentDNS, packetDirectionClientDownstream, readPacket) { continue @@ -2280,6 +2328,7 @@ func getPacketDestinationIPAddress( func processPacket( metrics *packetMetrics, session *session, + clientTransparentDNS *clientTransparentDNS, direction packetDirection, packet []byte) bool { @@ -2438,6 +2487,27 @@ func processPacket( // - The traffic rules checks are bypassed, since transparent // DNS is essential + // Transparent DNS is a two-step translation. On the client, the VPN + // can be configured with any private address range, so as to not + // conflict with other local networks, such as WiFi. For example, the + // client may select from 192.168.0.0/16, when an existing interface + // uses a subnet in 10.0.0.0/8, and specify the VPN DNS server as 192.168.0.1. + // + // The first translation, on the client side, rewrites packets + // destined to 192.168.0.1:53, the DNS server, to the destination + // transparentDNSResolverIPv4Address:53. This packet is sent to the + // server. + // + // The second translation, on the server side, rewrites packets + // destined to transparentDNSResolverIPv4Address:53 to an actual DNS + // server destination. + // + // Then, reverse rewrites are applied to DNS response packets: the + // server rewrites the source address actual-DNS-server:53 to + // transparentDNSResolverIPv4Address:53, and then the client rewrites + // the source address transparentDNSResolverIPv4Address:53 to + // 192.168.0.1:53, and that packet is written to the tun device. + doTransparentDNS := false if isServer { @@ -2520,6 +2590,34 @@ func processPacket( } } } + + } else { // isClient + + if direction == packetDirectionClientUpstream { + + // DNS packets destined to the configured VPN DNS servers, + // specified in clientTransparentDNS, are rewritten to go to + // transparentDNSResolverIPv4/6Address. + + if destinationPort == portNumberDNS { + if (version == 4 && destinationIPAddress.Equal(clientTransparentDNS.IPv4Address)) || + (version == 6 && destinationIPAddress.Equal(clientTransparentDNS.IPv6Address)) { + doTransparentDNS = true + } + } + + } else { // packetDirectionClientDownstream + + // DNS packets with a transparentDNSResolverIPv4/6Address source + // address are rewritten to come from the configured VPN DNS servers. + + if sourcePort == portNumberDNS { + if (version == 4 && sourceIPAddress.Equal(transparentDNSResolverIPv4Address)) || + (version == 6 && sourceIPAddress.Equal(transparentDNSResolverIPv6Address)) { + doTransparentDNS = true + } + } + } } // Apply rewrites before determining flow ID to ensure that corresponding up- @@ -2573,7 +2671,7 @@ func processPacket( if version == 4 { rewriteDestinationIPAddress = session.getOriginalIPv4Address() - } else { // version == 6 + } else if version == 6 { rewriteDestinationIPAddress = session.getOriginalIPv6Address() } @@ -2589,10 +2687,38 @@ func processPacket( if version == 4 { rewriteSourceIPAddress = transparentDNSResolverIPv4Address - } else { // version == 6 + } else if version == 6 { rewriteSourceIPAddress = transparentDNSResolverIPv6Address } } + + } else if direction == packetDirectionClientUpstream { + + // Rewrite the destination address to be + // transparentDNSResolverIPv4/6Address, which the server will + // subsequently send on to actual DNS servers. + + if doTransparentDNS { + + if version == 4 { + rewriteDestinationIPAddress = transparentDNSResolverIPv4Address + } else if version == 6 { + rewriteDestinationIPAddress = transparentDNSResolverIPv6Address + } + } + } else if direction == packetDirectionClientDownstream { + + // Rewrite the source address so the DNS response appears to come from + // the configured VPN DNS server. + + if doTransparentDNS { + + if version == 4 { + rewriteSourceIPAddress = clientTransparentDNS.IPv4Address + } else if version == 6 { + rewriteSourceIPAddress = clientTransparentDNS.IPv6Address + } + } } // Check if flow is tracked before checking traffic permission diff --git a/psiphon/common/tun/tun_test.go b/psiphon/common/tun/tun_test.go index da9bb96e5..efd8da2ae 100644 --- a/psiphon/common/tun/tun_test.go +++ b/psiphon/common/tun/tun_test.go @@ -110,7 +110,7 @@ func testTunneledTCP(t *testing.T, useIPv6 bool) { var flowCounter bytesTransferredCounter - flowActivityUpdaterMaker := func(_ string, IPAddress net.IP) []FlowActivityUpdater { + flowActivityUpdaterMaker := func(_ bool, _ string, IPAddress net.IP) []FlowActivityUpdater { if IPAddress.String() != testTCPServer.getListenerIPAddress() { t.Fatalf("unexpected flow IP address") @@ -485,6 +485,13 @@ type testClient struct { tunClient *Client } +const ( + clientIPv4AddressCIDR = "172.16.0.1/24" + clientIPv6AddressCIDR = "fd26:b6a6:4454:310a:0000:0000:0000:0001/64" + clientTransparentDNSIPv4Address = "172.16.0.2" + clientTransparentDNSIPv6Address = "fd26:b6a6:4454:310a:0000:0000:0000:0002" +) + func startTestClient( useIPv6 bool, MTU int, @@ -505,8 +512,10 @@ func startTestClient( Logger: logger, SudoNetworkConfigCommands: os.Getenv("TUN_TEST_SUDO") != "", AllowNoIPv6NetworkConfiguration: !useIPv6, - IPv4AddressCIDR: "172.16.0.1/24", - IPv6AddressCIDR: "fd26:b6a6:4454:310a:0000:0000:0000:0001/64", + IPv4AddressCIDR: clientIPv4AddressCIDR, + IPv6AddressCIDR: clientIPv6AddressCIDR, + TransparentDNSIPv4Address: clientTransparentDNSIPv4Address, + TransparentDNSIPv6Address: clientTransparentDNSIPv6Address, RouteDestinations: routeDestinations, Transport: unixConn, MTU: MTU, @@ -719,11 +728,11 @@ func testDNSClient(useIPv6 bool, tunDeviceName string) error { var sockAddr syscall.Sockaddr if !useIPv6 { - copy(ipv4[:], transparentDNSResolverIPv4Address) + copy(ipv4[:], net.ParseIP(clientTransparentDNSIPv4Address).To4()) domain = syscall.AF_INET sockAddr = &syscall.SockaddrInet4{Addr: ipv4, Port: portNumberDNS} } else { - copy(ipv6[:], transparentDNSResolverIPv6Address) + copy(ipv6[:], net.ParseIP(clientTransparentDNSIPv6Address)) domain = syscall.AF_INET6 sockAddr = &syscall.SockaddrInet6{Addr: ipv6, Port: portNumberDNS} } diff --git a/psiphon/config.go b/psiphon/config.go index 5242e374f..76ec993b3 100755 --- a/psiphon/config.go +++ b/psiphon/config.go @@ -488,6 +488,18 @@ type Config struct { // set, TunnelPoolSize must be 1. PacketTunnelTunFileDescriptor int + // PacketTunnelTransparentDNSIPv4Address is the IPv4 address of the DNS + // server configured by a VPN using a packet tunnel. All DNS packets + // destined to this DNS server are transparently redirected to the + // Psiphon server DNS. + PacketTunnelTransparentDNSIPv4Address string + + // PacketTunnelTransparentDNSIPv6Address is the IPv6 address of the DNS + // server configured by a VPN using a packet tunnel. All DNS packets + // destined to this DNS server are transparently redirected to the + // Psiphon server DNS. + PacketTunnelTransparentDNSIPv6Address string + // SessionID specifies a client session ID to use in the Psiphon API. The // session ID should be a randomly generated value that is used only for a // single session, which is defined as the period between a user starting diff --git a/psiphon/controller.go b/psiphon/controller.go index 81f9edb8a..a12864182 100755 --- a/psiphon/controller.go +++ b/psiphon/controller.go @@ -180,9 +180,11 @@ func NewController(config *Config) (controller *Controller, err error) { packetTunnelTransport := NewPacketTunnelTransport() packetTunnelClient, err := tun.NewClient(&tun.ClientConfig{ - Logger: NoticeCommonLogger(), - TunFileDescriptor: config.PacketTunnelTunFileDescriptor, - Transport: packetTunnelTransport, + Logger: NoticeCommonLogger(), + TunFileDescriptor: config.PacketTunnelTunFileDescriptor, + TransparentDNSIPv4Address: config.PacketTunnelTransparentDNSIPv4Address, + TransparentDNSIPv6Address: config.PacketTunnelTransparentDNSIPv6Address, + Transport: packetTunnelTransport, }) if err != nil { return nil, errors.Trace(err) @@ -481,7 +483,6 @@ fetcherLoop: // // TODO: refactor upgrade downloader and remote server list fetcher to use // common code (including the resumable download routines). -// func (controller *Controller) upgradeDownloader() { defer controller.runWaitGroup.Done() From 441e8aed507eaea7052cb869e5fef10360749313 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Mon, 27 Feb 2023 16:42:32 -0500 Subject: [PATCH 29/44] Changes based on feedback --- psiphon/common/resolver/resolver.go | 2 +- psiphon/common/transforms/httpTransformer.go | 80 ++++++++----------- .../common/transforms/httpTransformer_test.go | 8 +- psiphon/common/transforms/transforms.go | 60 ++++++++++---- psiphon/common/transforms/transforms_test.go | 6 +- 5 files changed, 86 insertions(+), 70 deletions(-) diff --git a/psiphon/common/resolver/resolver.go b/psiphon/common/resolver/resolver.go index b5c2b8117..6ca519bcd 100644 --- a/psiphon/common/resolver/resolver.go +++ b/psiphon/common/resolver/resolver.go @@ -1643,7 +1643,7 @@ func (conn *transformDNSPacketConn) Write(b []byte) (int, error) { // the network packet MTU. input := hex.EncodeToString(b) - output, err := conn.transform.Apply(conn.seed, input) + output, err := conn.transform.ApplyString(conn.seed, input) if err != nil { return 0, errors.Trace(err) } diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index 3c3e090aa..8d0b1d91d 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -74,8 +74,9 @@ type HTTPTransformer struct { // state is the HTTPTransformer state. Possible values are // httpTransformerReadWriteHeader and httpTransformerReadWriteBody. state int64 - // b is the accumulated bytes of the current HTTP request. - b []byte + // b is used to buffer the accumulated bytes of the current HTTP request + // header until the entire header is received and written. + b bytes.Buffer // remain is the number of remaining HTTP request body bytes to read into b. remain uint64 @@ -100,7 +101,8 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { if t.state == httpTransformerReadWriteHeader { - t.b = append(t.b, b...) + // Do not need to check return value https://github.com/golang/go/blob/1e9ff255a130200fcc4ec5e911d28181fce947d5/src/bytes/buffer.go#L164 + t.b.Write(b) // Wait until the entire HTTP request header has been read. Must check // all accumulated bytes incase the "\r\n\r\n" separator is written over @@ -109,7 +111,7 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { sep := []byte("\r\n\r\n") - headerBodyLines := bytes.SplitN(t.b, sep, 2) // split header and body + headerBodyLines := bytes.SplitN(t.b.Bytes(), sep, 2) // split header and body if len(headerBodyLines) <= 1 { // b buffered in t.b and the entire HTTP request header has not been @@ -158,10 +160,10 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { // transform and write header headerLen := len(headerBodyLines[0]) + len(sep) - header := t.b[:headerLen] + header := t.b.Bytes()[:headerLen] if t.transform != nil { - newHeaderS, err := t.transform.Apply(t.seed, string(header)) + newHeader, err := t.transform.Apply(t.seed, header) if err != nil { // TODO: consider logging an error and skiping transform // instead of returning an error, if the transform is broken @@ -169,13 +171,18 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { return len(b), errors.Trace(err) } - newHeader := []byte(newHeaderS) - // only allocate new slice if header length changed if len(newHeader) == len(header) { - copy(t.b[:len(header)], newHeader) + // Do not need to check return value. It is guaranteed that + // n == len(newHeader) because t.b.Len() >= n if the header + // size has not changed. + copy(t.b.Bytes()[:len(header)], newHeader) } else { - t.b = append(newHeader, t.b[len(header):]...) + b := t.b.Bytes() + t.b.Reset() + // Do not need to check return value of bytes.Buffer.Write() https://github.com/golang/go/blob/1e9ff255a130200fcc4ec5e911d28181fce947d5/src/bytes/buffer.go#L164 + t.b.Write(newHeader) + t.b.Write(b[len(header):]) } header = newHeader @@ -188,12 +195,20 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { } t.remain += uint64(len(header)) - err = t.writeBuffer() + if uint64(t.b.Len()) > t.remain { + // Should never happen, multiple requests written in a single + // Write() are not supported. + return len(b), errors.TraceNew("multiple HTTP requests in single Write() not supported") + } + + n, err := t.b.WriteTo(t.Conn) + t.remain -= uint64(n) if t.remain > 0 { t.state = httpTransformerReadWriteBody } + // Do not wrap any I/O err returned by Conn return len(b), err } @@ -203,14 +218,20 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { // Must write buffered bytes first, in-order, to write bytes to underlying // net.Conn in the same order they were received in. // + // Already checked that t.b does not contain bytes of a subsequent HTTP + // request when the header is parsed, i.e. at this point it is guaranteed + // that t.b.Len() <= t.remain. + // // In practise the buffer will be empty by this point because its entire - // contents will have been written in the first call to t.writeBuffer() + // contents will have been written in the first call to t.b.WriteTo(t.Conn) // when the header is received, parsed, and transformed; otherwise the // underlying transport will have failed and the caller will not invoke // Write() again on this instance. See HTTPTransformer.Write() comment. - err := t.writeBuffer() + wrote, err := t.b.WriteTo(t.Conn) + t.remain -= uint64(wrote) if err != nil { // b not written or buffered + // Do not wrap any I/O err returned by Conn return 0, err } @@ -229,41 +250,10 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { t.remain = 0 } + // Do not wrap any I/O err returned by Conn return n, err } -func (t *HTTPTransformer) writeBuffer() error { - - if uint64(len(t.b)) > t.remain { - // Should never happen, multiple requests written in a single - // Write() are not supported. - return errors.TraceNew("multiple HTTP requests in single Write() not supported") - } - - // Continue to Write() buffered bytes to underlying net.Conn until Write() - // fails or all buffered bytes are written. - for len(t.b) > 0 { - - var n int - n, err := t.Conn.Write(t.b) - - t.remain -= uint64(n) - - if n == len(t.b) { - t.b = nil - } else { - t.b = t.b[n:] - } - - // Stop writing and return if there was an error - if err != nil { - return err - } - } - - return nil -} - func WrapDialerWithHTTPTransformer(dialer common.Dialer, params *HTTPTransformerParameters) common.Dialer { return func(ctx context.Context, network, addr string) (net.Conn, error) { conn, err := dialer(ctx, network, addr) diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go index 3a64befa9..a356ac43d 100644 --- a/psiphon/common/transforms/httpTransformer_test.go +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -90,7 +90,7 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { wantOutput: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", chunkSize: 1, connWriteLimit: 1, - connWriteErrs: []error{nil, errors.New("err1"), errors.New("err2")}, + connWriteErrs: []error{errors.New("err1"), errors.New("err2")}, wantError: errors.New("err1"), }, { @@ -257,6 +257,9 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { if err != nil { t.Fatalf("unexpected error %v", err) } + if string(conn.b) != tt.wantOutput { + t.Fatalf("expected \"%s\" of len %d but got \"%s\" of len %d", escapeNewlines(tt.wantOutput), len(tt.wantOutput), escapeNewlines(string(conn.b)), len(conn.b)) + } } else { // tt.wantError != nil if err == nil { @@ -265,9 +268,6 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { t.Fatalf("expected error %v got %v", tt.wantError, err) } } - if tt.wantError == nil && string(conn.b) != tt.wantOutput { - t.Fatalf("expected \"%s\" of len %d but got \"%s\" of len %d", escapeNewlines(tt.wantOutput), len(tt.wantOutput), escapeNewlines(string(conn.b)), len(conn.b)) - } }) } } diff --git a/psiphon/common/transforms/transforms.go b/psiphon/common/transforms/transforms.go index 0cc3249e3..c80008a76 100644 --- a/psiphon/common/transforms/transforms.go +++ b/psiphon/common/transforms/transforms.go @@ -61,7 +61,7 @@ func (specs Specs) Validate() error { } for _, spec := range specs { // Call Apply to compile/validate the regular expressions and generators. - _, err := spec.Apply(seed, "") + _, err := spec.ApplyString(seed, "") if err != nil { return errors.Trace(err) } @@ -142,30 +142,56 @@ func (specs Specs) Select(scope string, scopedSpecs ScopedSpecNames) (string, Sp return specName, spec } -// Apply applies the Spec to the input string, producing the output string. +// ApplyString applies the Spec to the input string, producing the output string. // // The input seed is used for all random generation. The same seed can be // supplied to produce the same output, for replay. -func (spec Spec) Apply(seed *prng.Seed, input string) (string, error) { - - // TODO: the compiled regexp and regen could be cached, but the seed is an - // issue with caching the regen. +func (spec Spec) ApplyString(seed *prng.Seed, input string) (string, error) { value := input for _, transform := range spec { - args := ®en.GeneratorArgs{ - RngSource: prng.NewPRNGWithSeed(seed), - Flags: syntax.OneLine | syntax.NonGreedy, - } - rg, err := regen.NewGenerator(transform[1], args) - if err != nil { - panic(err.Error()) - } - replacement := rg.Generate() - - re := regexp.MustCompile(transform[0]) + re, replacement := makeRegexAndRepl(seed, transform) value = re.ReplaceAllString(value, replacement) } return value, nil } + +// Apply applies the Spec to the input bytes, producing the output bytes. +// +// The input seed is used for all random generation. The same seed can be +// supplied to produce the same output, for replay. +func (spec Spec) Apply(seed *prng.Seed, input []byte) ([]byte, error) { + + value := input + for _, transform := range spec { + + re, replacement := makeRegexAndRepl(seed, transform) + value = re.ReplaceAll(value, []byte(replacement)) + } + return value, nil +} + +// makeRegexAndRepl generates the regex and replacement for a given seed and +// transform. The same seed can be supplied to produce the same output, for +// replay. +func makeRegexAndRepl(seed *prng.Seed, transform [2]string) (re *regexp.Regexp, replacement string) { + + // TODO: the compiled regexp and regen could be cached, but the seed is an + // issue with caching the regen. + + args := ®en.GeneratorArgs{ + RngSource: prng.NewPRNGWithSeed(seed), + Flags: syntax.OneLine | syntax.NonGreedy, + } + rg, err := regen.NewGenerator(transform[1], args) + if err != nil { + panic(err.Error()) + } + + replacement = rg.Generate() + + re = regexp.MustCompile(transform[0]) + + return +} diff --git a/psiphon/common/transforms/transforms_test.go b/psiphon/common/transforms/transforms_test.go index d5b0ced50..838ab4c72 100644 --- a/psiphon/common/transforms/transforms_test.go +++ b/psiphon/common/transforms/transforms_test.go @@ -87,7 +87,7 @@ func runTestTransforms() error { } input := "aa0aa0aa0bb0aa0bb0aa0bb0aa0bb0aa0bb0aa0bb0aa0bb0aa0bb0aa0bb0aa" - output, err := spec.Apply(seed, input) + output, err := spec.ApplyString(seed, input) if err != nil { return errors.Trace(err) } @@ -102,7 +102,7 @@ func runTestTransforms() error { previousOutput := output - output, err = spec.Apply(seed, input) + output, err = spec.ApplyString(seed, input) if err != nil { return errors.Trace(err) } @@ -121,7 +121,7 @@ func runTestTransforms() error { return errors.Trace(err) } - output, err = spec.Apply(seed, input) + output, err = spec.ApplyString(seed, input) if err != nil { return errors.Trace(err) } From 5fbe7d526e99c5e082904067cedb1dcdd6e29ea0 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Tue, 28 Feb 2023 09:56:59 -0500 Subject: [PATCH 30/44] Fix comment --- psiphon/common/transforms/httpTransformer.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index 8d0b1d91d..ea3ccfd90 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -77,7 +77,10 @@ type HTTPTransformer struct { // b is used to buffer the accumulated bytes of the current HTTP request // header until the entire header is received and written. b bytes.Buffer - // remain is the number of remaining HTTP request body bytes to read into b. + // remain is the number of remaining HTTP request bytes to write to the + // underlying net.Conn. Set to the value of Content-Length (HTTP request + // body bytes) plus the length of the transformed HTTP header once the + // current request header is received. remain uint64 net.Conn From e9383addc16d109b8d72fafeea421b5df080e912 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Tue, 28 Feb 2023 11:05:19 -0500 Subject: [PATCH 31/44] Fix: missing test case --- psiphon/common/parameters/parameters_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/psiphon/common/parameters/parameters_test.go b/psiphon/common/parameters/parameters_test.go index ef93e4938..707db094a 100755 --- a/psiphon/common/parameters/parameters_test.go +++ b/psiphon/common/parameters/parameters_test.go @@ -177,6 +177,13 @@ func TestGetDefaultParameters(t *testing.T) { if !reflect.DeepEqual(v, g) { t.Fatalf("ProtocolTransformScopedSpecNames returned %+v expected %+v", g, v) } + case protocol.LabeledTunnelProtocols: + for label, protocols := range v { + g := p.Get().LabeledTunnelProtocols(name, label) + if !reflect.DeepEqual(protocols, g) { + t.Fatalf("LabeledTunnelProtocols returned %+v expected %+v", g, protocols) + } + } default: t.Fatalf("Unhandled default type: %s (%T)", name, defaults.value) } From f7fcf4d09d2d7c5d67a3719017dba8d5cfcbabfe Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Tue, 28 Feb 2023 11:27:01 -0500 Subject: [PATCH 32/44] Fix: do not hardcode test port --- psiphon/common/transforms/httpTransformer_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go index a356ac43d..7fad1c3af 100644 --- a/psiphon/common/transforms/httpTransformer_test.go +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -386,13 +386,18 @@ func TestHTTPTransformerHTTPServer(t *testing.T) { }() }) + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed %v", err) + } + s := &http.Server{ - Addr: "127.0.0.1:8080", + Addr: listener.Addr().String(), Handler: mux, } go func() { - s.ListenAndServe() + s.Serve(listener) }() client := http.Client{ From 1aaaa2b407e95cbe8f1e8cc8cc467f664fa6a3b3 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Tue, 28 Feb 2023 11:57:25 -0500 Subject: [PATCH 33/44] Fix: broken/missing test case --- psiphon/server/psinet/psinet_test.go | 29 ++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/psiphon/server/psinet/psinet_test.go b/psiphon/server/psinet/psinet_test.go index 5348fb367..3c4ce24a3 100644 --- a/psiphon/server/psinet/psinet_test.go +++ b/psiphon/server/psinet/psinet_test.go @@ -46,11 +46,11 @@ func TestDatabase(t *testing.T) { "home_pages" : { "CLIENT-REGION" : [{ "region" : "CLIENT-REGION", - "url" : "HOME-PAGE-URL?client_region=XX" + "url" : "HOME-PAGE-URL?client_region=XX&device_region=XX" }], "None" : [{ "region" : "None", - "url" : "DEFAULT-HOME-PAGE-URL?client_region=XX" + "url" : "DEFAULT-HOME-PAGE-URL?client_region=XX&device_region=XX" }] }, "mobile_home_pages": { @@ -64,7 +64,7 @@ func TestDatabase(t *testing.T) { }] }, "alert_action_urls" : { - "ALERT-REASON-1" : ["SPONSOR-ALERT-1-ACTION-URL?client_region=XX"] + "ALERT-REASON-1" : ["SPONSOR-ALERT-1-ACTION-URL?client_region=XX&device_region=XX"] }, "https_request_regexes" : [{ "regex" : "REGEX-VALUE", @@ -119,22 +119,23 @@ func TestDatabase(t *testing.T) { sponsorID string clientRegion string clientASN string + deviceRegion string isMobile bool expectedURL string }{ - {"SPONSOR-ID", "CLIENT-REGION", "65535", false, "HOME-PAGE-URL?client_region=CLIENT-REGION"}, - {"SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", false, "DEFAULT-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION"}, - {"SPONSOR-ID", "CLIENT-REGION", "65535", true, "MOBILE-HOME-PAGE-URL?client_region=CLIENT-REGION&client_asn=65535"}, - {"SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", true, "DEFAULT-MOBILE-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION&client_asn=65535"}, - {"UNCONFIGURED-SPONSOR-ID", "CLIENT-REGION", "65535", false, "HOME-PAGE-URL?client_region=CLIENT-REGION"}, - {"UNCONFIGURED-SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", false, "DEFAULT-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION"}, - {"UNCONFIGURED-SPONSOR-ID", "CLIENT-REGION", "65535", true, "MOBILE-HOME-PAGE-URL?client_region=CLIENT-REGION&client_asn=65535"}, - {"UNCONFIGURED-SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", true, "DEFAULT-MOBILE-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION&client_asn=65535"}, + {"SPONSOR-ID", "CLIENT-REGION", "65535", "DEVICE-REGION", false, "HOME-PAGE-URL?client_region=CLIENT-REGION&device_region=DEVICE-REGION"}, + {"SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", "DEVICE-REGION", false, "DEFAULT-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION&device_region=DEVICE-REGION"}, + {"SPONSOR-ID", "CLIENT-REGION", "65535", "DEVICE-REGION", true, "MOBILE-HOME-PAGE-URL?client_region=CLIENT-REGION&client_asn=65535"}, + {"SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", "DEVICE-REGION", true, "DEFAULT-MOBILE-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION&client_asn=65535"}, + {"UNCONFIGURED-SPONSOR-ID", "CLIENT-REGION", "65535", "DEVICE-REGION", false, "HOME-PAGE-URL?client_region=CLIENT-REGION&device_region=DEVICE-REGION"}, + {"UNCONFIGURED-SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", "DEVICE-REGION", false, "DEFAULT-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION&device_region=DEVICE-REGION"}, + {"UNCONFIGURED-SPONSOR-ID", "CLIENT-REGION", "65535", "DEVICE-REGION", true, "MOBILE-HOME-PAGE-URL?client_region=CLIENT-REGION&client_asn=65535"}, + {"UNCONFIGURED-SPONSOR-ID", "UNCONFIGURED-CLIENT-REGION", "65535", "DEVICE-REGION", true, "DEFAULT-MOBILE-HOME-PAGE-URL?client_region=UNCONFIGURED-CLIENT-REGION&client_asn=65535"}, } for _, testCase := range homePageTestCases { t.Run(fmt.Sprintf("%+v", testCase), func(t *testing.T) { - homepages := db.GetHomepages(testCase.sponsorID, testCase.clientRegion, testCase.clientASN, testCase.isMobile) + homepages := db.GetHomepages(testCase.sponsorID, testCase.clientRegion, testCase.clientASN, testCase.deviceRegion, testCase.isMobile) if len(homepages) != 1 || homepages[0] != testCase.expectedURL { t.Fatalf("unexpected home page: %+v", homepages) } @@ -147,7 +148,7 @@ func TestDatabase(t *testing.T) { expectedURLCount int expectedURL string }{ - {"ALERT-REASON-1", "SPONSOR-ID", 1, "SPONSOR-ALERT-1-ACTION-URL?client_region=CLIENT-REGION"}, + {"ALERT-REASON-1", "SPONSOR-ID", 1, "SPONSOR-ALERT-1-ACTION-URL?client_region=CLIENT-REGION&device_region=DEVICE-REGION"}, {"ALERT-REASON-1", "UNCONFIGURED-SPONSOR-ID", 1, "DEFAULT-ALERT-1-ACTION-URL?client_region=CLIENT-REGION"}, {"ALERT-REASON-2", "SPONSOR-ID", 1, "DEFAULT-ALERT-2-ACTION-URL?client_region=CLIENT-REGION"}, {"ALERT-REASON-2", "UNCONFIGURED-SPONSOR-ID", 1, "DEFAULT-ALERT-2-ACTION-URL?client_region=CLIENT-REGION"}, @@ -156,7 +157,7 @@ func TestDatabase(t *testing.T) { for _, testCase := range alertActionURLTestCases { t.Run(fmt.Sprintf("%+v", testCase), func(t *testing.T) { - URLs := db.GetAlertActionURLs(testCase.alertReason, testCase.sponsorID, "CLIENT-REGION", "") + URLs := db.GetAlertActionURLs(testCase.alertReason, testCase.sponsorID, "CLIENT-REGION", "", "DEVICE-REGION") if len(URLs) != testCase.expectedURLCount || (len(URLs) > 0 && URLs[0] != testCase.expectedURL) { t.Fatalf("unexpected URLs: %d %+v, %+v", testCase.expectedURLCount, testCase.expectedURL, URLs) } From cbef418ecfd99378fe6b4284c707305e2680a7a6 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Wed, 1 Mar 2023 14:15:10 -0500 Subject: [PATCH 34/44] Add DisableHTTPTransforms --- psiphon/common/protocol/serverEntry.go | 1 + psiphon/dialParameters.go | 27 ++++++++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/psiphon/common/protocol/serverEntry.go b/psiphon/common/protocol/serverEntry.go index e730021eb..b3c3d4afd 100644 --- a/psiphon/common/protocol/serverEntry.go +++ b/psiphon/common/protocol/serverEntry.go @@ -75,6 +75,7 @@ type ServerEntry struct { TacticsRequestObfuscatedKey string `json:"tacticsRequestObfuscatedKey"` ConfigurationVersion int `json:"configurationVersion"` Signature string `json:"signature"` + DisableHTTPTransforms bool `json:"disableHTTPTransforms"` // These local fields are not expected to be present in downloaded server // entries. They are added by the client to record and report stats about diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index d2843d8f9..4ccd113f4 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -776,19 +776,26 @@ func MakeDialParameters( } - if (!isReplay || !replayHTTPTransformerParameters) && protocol.TunnelProtocolUsesMeekHTTP(dialParams.TunnelProtocol) { + if protocol.TunnelProtocolUsesMeekHTTP(dialParams.TunnelProtocol) { - isFronted := protocol.TunnelProtocolUsesFrontedMeek(dialParams.TunnelProtocol) + if serverEntry.DisableHTTPTransforms { - params, err := makeHTTPTransformerParameters(config.GetParameters().Get(), serverEntry.FrontingProviderID, isFronted) - if err != nil { - return nil, errors.Trace(err) - } - - if params.ProtocolTransformSpec != nil { - dialParams.HTTPTransformerParameters = params - } else { dialParams.HTTPTransformerParameters = nil + + } else if !isReplay || !replayHTTPTransformerParameters { + + isFronted := protocol.TunnelProtocolUsesFrontedMeek(dialParams.TunnelProtocol) + + params, err := makeHTTPTransformerParameters(config.GetParameters().Get(), serverEntry.FrontingProviderID, isFronted) + if err != nil { + return nil, errors.Trace(err) + } + + if params.ProtocolTransformSpec != nil { + dialParams.HTTPTransformerParameters = params + } else { + dialParams.HTTPTransformerParameters = nil + } } } From 9e3e8d6c1af9a117f00fb5d700c44d6e8f34b9e7 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Wed, 1 Mar 2023 15:21:19 -0500 Subject: [PATCH 35/44] Fix: revert hard-coded Android buildrev --- MobileLibrary/Android/make.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MobileLibrary/Android/make.bash b/MobileLibrary/Android/make.bash index a755ee09d..5233fc13f 100755 --- a/MobileLibrary/Android/make.bash +++ b/MobileLibrary/Android/make.bash @@ -18,7 +18,7 @@ export GOCACHE=/tmp BUILDINFOFILE="psiphon-tunnel-core_buildinfo.txt" BUILDDATE=$(date --iso-8601=seconds) BUILDREPO="https://github.com/Psiphon-Labs/psiphon-tunnel-core.git" -BUILDREV="5641695f" +BUILDREV=$(git rev-parse --short HEAD) GOVERSION=$(go version | perl -ne '/go version (.*?) / && print $1') LDFLAGS="\ From 8ae97c76fc0ed63dc09e952a24d7f9abe9902c08 Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Wed, 1 Mar 2023 15:59:22 -0500 Subject: [PATCH 36/44] Fix: copy req body bytes --- psiphon/common/transforms/httpTransformer.go | 19 ++++++++++++++++--- .../common/transforms/httpTransformer_test.go | 14 ++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/psiphon/common/transforms/httpTransformer.go b/psiphon/common/transforms/httpTransformer.go index ea3ccfd90..101647cf6 100644 --- a/psiphon/common/transforms/httpTransformer.go +++ b/psiphon/common/transforms/httpTransformer.go @@ -179,13 +179,26 @@ func (t *HTTPTransformer) Write(b []byte) (int, error) { // Do not need to check return value. It is guaranteed that // n == len(newHeader) because t.b.Len() >= n if the header // size has not changed. - copy(t.b.Bytes()[:len(header)], newHeader) + copy(t.b.Bytes()[:headerLen], newHeader) } else { - b := t.b.Bytes() + + // Copy any request body bytes received before resetting the + // buffer. + var reqBody []byte + reqBodyLen := t.b.Len() - headerLen // number of request body bytes received + if reqBodyLen > 0 { + reqBody = make([]byte, reqBodyLen) + copy(reqBody, t.b.Bytes()[headerLen:]) + } + + // Reset the buffer and write transformed header and any + // request body bytes received into it. t.b.Reset() // Do not need to check return value of bytes.Buffer.Write() https://github.com/golang/go/blob/1e9ff255a130200fcc4ec5e911d28181fce947d5/src/bytes/buffer.go#L164 t.b.Write(newHeader) - t.b.Write(b[len(header):]) + if len(reqBody) > 0 { + t.b.Write(reqBody) + } } header = newHeader diff --git a/psiphon/common/transforms/httpTransformer_test.go b/psiphon/common/transforms/httpTransformer_test.go index 7fad1c3af..8e496ddb3 100644 --- a/psiphon/common/transforms/httpTransformer_test.go +++ b/psiphon/common/transforms/httpTransformer_test.go @@ -125,6 +125,20 @@ func TestHTTPTransformerHTTPRequest(t *testing.T) { chunkSize: 1, transform: Spec{[2]string{"4", "100"}}, }, + { + name: "transform with separate write for header and body", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcd", + chunkSize: 38, // length of header + transform: Spec{[2]string{"4", "100"}}, + }, + { + name: "transform with single write", + input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", + wantOutput: "POST / HTTP/1.1\r\nContent-Length: 100\r\n\r\nabcd", + chunkSize: 999, + transform: Spec{[2]string{"4", "100"}}, + }, { name: "transform with partial write and errors in header write", input: "POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\nabcd", From 821be15086ec9f21e3033c0a95ff476a784d70ed Mon Sep 17 00:00:00 2001 From: mirokuratczyk Date: Tue, 7 Mar 2023 15:19:00 -0500 Subject: [PATCH 37/44] Fix: http_transform not logged --- psiphon/serverApi.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/psiphon/serverApi.go b/psiphon/serverApi.go index 1bbc65268..d864baf8b 100644 --- a/psiphon/serverApi.go +++ b/psiphon/serverApi.go @@ -1104,17 +1104,17 @@ func getBaseAPIParameters( params["dns_transform"] = dialParams.ResolveParameters.ProtocolTransformName } - if dialParams.HTTPTransformerParameters != nil { - if dialParams.HTTPTransformerParameters.ProtocolTransformSpec != nil { - params["http_transform"] = dialParams.HTTPTransformerParameters.ProtocolTransformName - } - } - params["dns_attempt"] = strconv.Itoa( dialParams.ResolveParameters.GetFirstAttemptWithAnswer()) } } + if dialParams.HTTPTransformerParameters != nil { + if dialParams.HTTPTransformerParameters.ProtocolTransformSpec != nil { + params["http_transform"] = dialParams.HTTPTransformerParameters.ProtocolTransformName + } + } + if dialParams.DialConnMetrics != nil { metrics := dialParams.DialConnMetrics.GetMetrics() for name, value := range metrics { From d658fd2f6a8dca6bc3c29edce35b40630397d94b Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Fri, 17 Mar 2023 08:48:27 -0400 Subject: [PATCH 38/44] Fix comments --- psiphon/config.go | 1 - psiphon/dialParameters.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/psiphon/config.go b/psiphon/config.go index 19bf543e0..730fbb879 100755 --- a/psiphon/config.go +++ b/psiphon/config.go @@ -177,7 +177,6 @@ type Config struct { // "UNFRONTED-MEEK-HTTPS-OSSH", "UNFRONTED-MEEK-SESSION-TICKET-OSSH", // "FRONTED-MEEK-OSSH", "FRONTED-MEEK-HTTP-OSSH", "QUIC-OSSH", // "FRONTED-MEEK-QUIC-OSSH", "TAPDANCE-OSSH", and "CONJURE-OSSH". - // For the default, an empty list, all protocols are used. LimitTunnelProtocols []string diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index 4ccd113f4..395de022a 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -330,7 +330,7 @@ func MakeDialParameters( // Initialize dial parameters. // // When not replaying, all required parameters are initialized. When - // replaying, existing parameters are retaing, subject to the replay-X + // replaying, existing parameters are retained, subject to the replay-X // tactics flags. // Select a network latency multiplier for this dial. This allows clients to From ceb7b5400a9e876c4e5bb2b4aee1818e66b2960c Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Mon, 20 Mar 2023 11:35:05 -0400 Subject: [PATCH 39/44] Fix startSendFeedback crash - mPsiphonTunnel is null - For getNetworkID, use isVpnMode = false --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 32 ++++++++++++++++++- psiphon/feedback.go | 19 +++++++++-- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 1adfdfa01..88b091a71 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -429,7 +429,37 @@ public long hasNetworkConnectivity() { @Override public String getNetworkID() { - return PsiphonTunnel.getNetworkID(context, mPsiphonTunnel.isVpnMode()); + + // startSendFeedback is invoked from the Psiphon UI process, not the Psiphon + // VPN process. + // + // Case 1: no VPN is running + // + // isVpnMode = true/false doesn't change the network ID; the network ID will + // be the physical network ID, and feedback may load load tactics and may + // fetch tactics. + // + // Case 2: Psiphon VPN is running + // + // In principle, we might want to set isVpnMode = true so that we obtain the + // physical network ID and load any existing tactics. However, as the VPN + // holds a lock on the data store, the load will fail; also no tactics request + // is attempted. + // + // Hypothetically, if a tactics request did proceed, the tunneled client GeoIP + // would not reflect the actual client location, and so it's safer to set + // isVpnMode = false to ensure fetched tactics are stored under a distinct + // Network ID ("VPN"). + // + // Case 3: another VPN is running + // + // Unlike case 2, there's no Psiphon VPN process holding the data store lock. + // As with case 2, there's some merit to setting isVpnMode = true in order to + // load existing tactics, but since a tactics request may proceed, it's safer + // to set isVpnMode = false and store fetched tactics under a distinct + // Network ID ("VPN"). + + return PsiphonTunnel.getNetworkID(context, false); } @Override diff --git a/psiphon/feedback.go b/psiphon/feedback.go index d11b0bf6a..33dac5ac4 100644 --- a/psiphon/feedback.go +++ b/psiphon/feedback.go @@ -121,8 +121,23 @@ func SendFeedback(ctx context.Context, config *Config, diagnostics, uploadPath s p.Close() getTacticsCtx, cancelFunc := context.WithTimeout(ctx, timeout) defer cancelFunc() - // Note: GetTactics will fail silently if the datastore used for retrieving - // and storing tactics is opened by another process. + + // Limitation: GetTactics will fail silently if the datastore used for + // retrieving and storing tactics is opened by another process. This can + // be the case on Android an iOS where SendFeedback is invoked by the UI + // process while tunneling is run by the VPN process. + // + // - When the Psiphon VPN is running, GetTactics won't load tactics. + // However, tactics may be less critical since feedback will be + // tunneled. This outcome also avoids fetching tactics while tunneled, + // where otherwise the client GeoIP used for tactics would reflect the + // tunnel egress point. + // + // - When the Psiphon VPN is not running, this will load tactics, and + // potentially fetch tactics, with either the correct, untunneled GeoIP + // or a network ID of "VPN" if some other non-Psiphon VPN is running + // (the caller should ensure a network ID of "VPN" in this case). + GetTactics(getTacticsCtx, config) // Get the latest client parameters From a158d7ed3184e51968c6eea3c23fe19bd1b2007b Mon Sep 17 00:00:00 2001 From: Eugene Fryntov Date: Fri, 9 Dec 2022 16:41:42 -0500 Subject: [PATCH 40/44] Fix occasional timeout exception in the feedback code. Do not call `PsiphonTunnelFeedback.shutdownAndAwaitTermination()` from `finalize()`. If the system decides to sleep in the middle of GC run a timeout may occur when the system wakes up. Instead initialize the executor queues at the very beginning of `startSendFeedback()` and shut them down at the very end of `stopSendFeedback()`. --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 88b091a71..d002a9ab5 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -340,21 +340,8 @@ public void writeRuntimeProfiles(String outputDirectory, int cpuSampleDurationSe // instances in parallel, or concurrently, will result in undefined behavior. public static class PsiphonTunnelFeedback { - final private ExecutorService workQueue; - final private ExecutorService callbackQueue; - - public PsiphonTunnelFeedback() { - workQueue = Executors.newSingleThreadExecutor(); - callbackQueue = Executors.newSingleThreadExecutor(); - } - - @Override - protected void finalize() throws Throwable { - // Ensure the queues are cleaned up. - shutdownAndAwaitTermination(callbackQueue); - shutdownAndAwaitTermination(workQueue); - super.finalize(); - } + private ExecutorService workQueue; + private ExecutorService callbackQueue; void shutdownAndAwaitTermination(ExecutorService pool) { try { @@ -398,6 +385,9 @@ void shutdownAndAwaitTermination(ExecutorService pool) { public void startSendFeedback(Context context, HostFeedbackHandler feedbackHandler, HostLogger logger, String feedbackConfigJson, String diagnosticsJson, String uploadPath, String clientPlatformPrefix, String clientPlatformSuffix) { + // initialize executor queues + workQueue = Executors.newSingleThreadExecutor(); + callbackQueue = Executors.newSingleThreadExecutor(); workQueue.submit(new Runnable() { @Override @@ -526,12 +516,18 @@ public void run() { // call is asynchronous and returns a future which is fulfilled when the underlying stop // operation completes. public Future stopSendFeedback() { - return workQueue.submit(new Runnable() { + Future result = workQueue.submit(new Runnable() { @Override public void run() { Psi.stopSendFeedback(); } }, null); + + // Also shutdown executor queues + shutdownAndAwaitTermination(workQueue); + shutdownAndAwaitTermination(callbackQueue); + + return result; } } From 1ff41b16b02179b7075e50714d78595e795084cf Mon Sep 17 00:00:00 2001 From: Eugene Fryntov Date: Fri, 9 Dec 2022 18:24:35 -0500 Subject: [PATCH 41/44] Prevent feedback code from crashing if the callback queue is already shutdown. --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 61 +++++++++++-------- 1 file changed, 37 insertions(+), 24 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index d002a9ab5..31ff2c901 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -66,6 +66,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -401,12 +402,15 @@ public void run() { new PsiphonProviderFeedbackHandler() { @Override public void sendFeedbackCompleted(java.lang.Exception e) { - callbackQueue.submit(new Runnable() { - @Override - public void run() { - feedbackHandler.sendFeedbackCompleted(e); - } - }); + try { + callbackQueue.submit(new Runnable() { + @Override + public void run() { + feedbackHandler.sendFeedbackCompleted(e); + } + }); + } catch (RejectedExecutionException ignored) { + } } }, new PsiphonProviderNetwork() { @@ -481,19 +485,25 @@ public void notice(String noticeJSON) { } String diagnosticMessage = noticeType + ": " + data.toString(); - callbackQueue.submit(new Runnable() { - @Override - public void run() { - logger.onDiagnosticMessage(diagnosticMessage); - } - }); + try { + callbackQueue.submit(new Runnable() { + @Override + public void run() { + logger.onDiagnosticMessage(diagnosticMessage); + } + }); + } catch (RejectedExecutionException ignored) { + } } catch (java.lang.Exception e) { - callbackQueue.submit(new Runnable() { - @Override - public void run() { - logger.onDiagnosticMessage("Error handling notice " + e.toString()); - } - }); + try { + callbackQueue.submit(new Runnable() { + @Override + public void run() { + logger.onDiagnosticMessage("Error handling notice " + e.toString()); + } + }); + } catch (RejectedExecutionException ignored) { + } } } }, @@ -501,12 +511,15 @@ public void run() { true // Use hasIPv6Route on Android ); } catch (java.lang.Exception e) { - callbackQueue.submit(new Runnable() { - @Override - public void run() { - feedbackHandler.sendFeedbackCompleted(new Exception("Error sending feedback", e)); - } - }); + try { + callbackQueue.submit(new Runnable() { + @Override + public void run() { + feedbackHandler.sendFeedbackCompleted(new Exception("Error sending feedback", e)); + } + }); + } catch (RejectedExecutionException ignored) { + } } } }); From 14fa339cd03b21cf4f481306d102e258c70f436d Mon Sep 17 00:00:00 2001 From: Eugene Fryntov Date: Mon, 20 Mar 2023 11:52:15 -0400 Subject: [PATCH 42/44] Minor refactoring of the feedback sending code: - Change `stopSendFeedback` signature to void since we never inspect the result. - Change `ExecutorService.submit()` calls to `ExecutorService.execute()` for the same reason as above. --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 27 +++++++------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 31ff2c901..fb43fce12 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -390,7 +390,7 @@ public void startSendFeedback(Context context, HostFeedbackHandler feedbackHandl workQueue = Executors.newSingleThreadExecutor(); callbackQueue = Executors.newSingleThreadExecutor(); - workQueue.submit(new Runnable() { + workQueue.execute(new Runnable() { @Override public void run() { try { @@ -403,7 +403,7 @@ public void run() { @Override public void sendFeedbackCompleted(java.lang.Exception e) { try { - callbackQueue.submit(new Runnable() { + callbackQueue.execute(new Runnable() { @Override public void run() { feedbackHandler.sendFeedbackCompleted(e); @@ -486,7 +486,7 @@ public void notice(String noticeJSON) { String diagnosticMessage = noticeType + ": " + data.toString(); try { - callbackQueue.submit(new Runnable() { + callbackQueue.execute(new Runnable() { @Override public void run() { logger.onDiagnosticMessage(diagnosticMessage); @@ -496,7 +496,7 @@ public void run() { } } catch (java.lang.Exception e) { try { - callbackQueue.submit(new Runnable() { + callbackQueue.execute(new Runnable() { @Override public void run() { logger.onDiagnosticMessage("Error handling notice " + e.toString()); @@ -512,7 +512,7 @@ public void run() { ); } catch (java.lang.Exception e) { try { - callbackQueue.submit(new Runnable() { + callbackQueue.execute(new Runnable() { @Override public void run() { feedbackHandler.sendFeedbackCompleted(new Exception("Error sending feedback", e)); @@ -525,22 +525,18 @@ public void run() { }); } - // Interrupt an in-progress feedback upload operation started with startSendFeedback(). This - // call is asynchronous and returns a future which is fulfilled when the underlying stop - // operation completes. - public Future stopSendFeedback() { - Future result = workQueue.submit(new Runnable() { + // Interrupt an in-progress feedback upload operation started with startSendFeedback(). + public void stopSendFeedback() { + workQueue.execute(new Runnable() { @Override public void run() { Psi.stopSendFeedback(); } - }, null); + }); // Also shutdown executor queues shutdownAndAwaitTermination(workQueue); shutdownAndAwaitTermination(callbackQueue); - - return result; } } @@ -802,9 +798,7 @@ private static String getNetworkID(Context context, boolean isVpnMode) { ConnectivityManager connectivityManager = (ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { - if (!isVpnMode) { - NetworkCapabilities capabilities = null; try { Network nw = connectivityManager.getActiveNetwork(); @@ -819,15 +813,12 @@ private static String getNetworkID(Context context, boolean isVpnMode) { if (capabilities != null && capabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) { return "VPN"; } - } - } NetworkInfo activeNetworkInfo = null; try { activeNetworkInfo = connectivityManager.getActiveNetworkInfo(); - } catch (java.lang.Exception e) { // May get exceptions due to missing permissions like android.permission.ACCESS_NETWORK_STATE. From b1a875a0f76ff37a332234c1536667531cb89aa4 Mon Sep 17 00:00:00 2001 From: Rod Hynes Date: Mon, 20 Mar 2023 12:00:52 -0400 Subject: [PATCH 43/44] Fix comment typos --- MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java | 2 +- psiphon/feedback.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 88b091a71..41ba2b8d4 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -436,7 +436,7 @@ public String getNetworkID() { // Case 1: no VPN is running // // isVpnMode = true/false doesn't change the network ID; the network ID will - // be the physical network ID, and feedback may load load tactics and may + // be the physical network ID, and feedback may load existing tactics or may // fetch tactics. // // Case 2: Psiphon VPN is running diff --git a/psiphon/feedback.go b/psiphon/feedback.go index 33dac5ac4..da73c5881 100644 --- a/psiphon/feedback.go +++ b/psiphon/feedback.go @@ -124,7 +124,7 @@ func SendFeedback(ctx context.Context, config *Config, diagnostics, uploadPath s // Limitation: GetTactics will fail silently if the datastore used for // retrieving and storing tactics is opened by another process. This can - // be the case on Android an iOS where SendFeedback is invoked by the UI + // be the case on Android and iOS where SendFeedback is invoked by the UI // process while tunneling is run by the VPN process. // // - When the Psiphon VPN is running, GetTactics won't load tactics. From 80d027f8ff46a08c678a674fcf6e86bb8f7ac938 Mon Sep 17 00:00:00 2001 From: Eugene Fryntov Date: Mon, 20 Mar 2023 14:29:18 -0400 Subject: [PATCH 44/44] PsiphonTunnelFeedback: move worker queues creation back to constructor. Rename stopSendFeedback() and add a comment about instance reusability. --- .../Android/PsiphonTunnel/PsiphonTunnel.java | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index fb43fce12..35030a18a 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -340,9 +340,8 @@ public void writeRuntimeProfiles(String outputDirectory, int cpuSampleDurationSe // - Only a single instance of PsiphonTunnelFeedback should be used at a time. Using multiple // instances in parallel, or concurrently, will result in undefined behavior. public static class PsiphonTunnelFeedback { - - private ExecutorService workQueue; - private ExecutorService callbackQueue; + private final ExecutorService workQueue = Executors.newSingleThreadExecutor(); + private final ExecutorService callbackQueue = Executors.newSingleThreadExecutor(); void shutdownAndAwaitTermination(ExecutorService pool) { try { @@ -386,10 +385,6 @@ void shutdownAndAwaitTermination(ExecutorService pool) { public void startSendFeedback(Context context, HostFeedbackHandler feedbackHandler, HostLogger logger, String feedbackConfigJson, String diagnosticsJson, String uploadPath, String clientPlatformPrefix, String clientPlatformSuffix) { - // initialize executor queues - workQueue = Executors.newSingleThreadExecutor(); - callbackQueue = Executors.newSingleThreadExecutor(); - workQueue.execute(new Runnable() { @Override public void run() { @@ -525,8 +520,10 @@ public void run() { }); } - // Interrupt an in-progress feedback upload operation started with startSendFeedback(). - public void stopSendFeedback() { + // Interrupt an in-progress feedback upload operation started with startSendFeedback() and shutdown + // executor queues. + // NOTE: this instance cannot be reused after shutdown() has been called. + public void shutdown() { workQueue.execute(new Runnable() { @Override public void run() { @@ -534,7 +531,6 @@ public void run() { } }); - // Also shutdown executor queues shutdownAndAwaitTermination(workQueue); shutdownAndAwaitTermination(callbackQueue); }