Update go.mod with new location of the Alizer library (#6962)

* Update 'go.mod' with new location of the Alizer library

* Downgrade and pin conflicting versions of 'github.com/docker/{cli,distributions}'

Pinning it to the version we had before.
Otherwise, there were issues when running 'go mod tidy':

```
$ go mod tidy
go: downloading github.com/operator-framework/api v0.17.6
go: finding module for package github.com/docker/docker/pkg/term
github.com/redhat-developer/odo/pkg/auth imports
        github.com/openshift/oc/pkg/cli/login imports
                github.com/openshift/oc/pkg/helpers/term imports
                        github.com/docker/docker/pkg/term: module
                        github.com/docker/docker@latest found
                        (v24.0.4+incompatible), but does not contain
                        package github.com/docker/docker/pkg/term

```

* Fix expected output in quickstart guides doc automation tests
This commit is contained in:
Armel Soro
2023-07-17 12:57:31 +02:00
committed by GitHub
parent abc808bdec
commit abdb1c568d
307 changed files with 11724 additions and 7158 deletions

View File

@@ -10,8 +10,9 @@ import (
"bufio"
"bytes"
"encoding/base64"
"github.com/ProtonMail/go-crypto/openpgp/errors"
"io"
"github.com/ProtonMail/go-crypto/openpgp/errors"
)
// A Block represents an OpenPGP armored structure.
@@ -208,12 +209,16 @@ TryNextBlock:
break
}
i := bytes.Index(line, []byte(": "))
i := bytes.Index(line, []byte(":"))
if i == -1 {
goto TryNextBlock
}
lastKey = string(line[:i])
p.Header[lastKey] = string(line[i+2:])
var value string
if len(line) > i+2 {
value = string(line[i+2:])
}
p.Header[lastKey] = value
}
p.lReader.in = r

View File

@@ -504,7 +504,7 @@ EachPacket:
// Else, ignoring the signature as it does not follow anything
// we would know to attach it to.
case *packet.PrivateKey:
if pkt.IsSubkey == false {
if !pkt.IsSubkey {
packets.Unread(p)
break EachPacket
}
@@ -513,7 +513,7 @@ EachPacket:
return nil, err
}
case *packet.PublicKey:
if pkt.IsSubkey == false {
if !pkt.IsSubkey {
packets.Unread(p)
break EachPacket
}

View File

@@ -39,7 +39,7 @@ type Config struct {
// and password-encrypted data.
// If nil, the default configuration is used
S2KConfig *s2k.Config
// Iteration count for Iterated S2K (String to Key).
// Iteration count for Iterated S2K (String to Key).
// Only used if sk2.Mode is nil.
// This value is duplicated here from s2k.Config for backwards compatibility.
// It determines the strength of the passphrase stretching when
@@ -135,9 +135,9 @@ func (c *Config) Cipher() CipherFunction {
func (c *Config) Now() time.Time {
if c == nil || c.Time == nil {
return time.Now()
return time.Now().Truncate(time.Second)
}
return c.Time()
return c.Time().Truncate(time.Second)
}
// KeyLifetime returns the validity period of the key.
@@ -198,7 +198,7 @@ func (c *Config) S2K() *s2k.Config {
}
// for backwards compatibility
if c != nil && c.S2KCount > 0 && c.S2KConfig == nil {
return &s2k.Config {
return &s2k.Config{
S2KCount: c.S2KCount,
}
}

View File

@@ -531,8 +531,8 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip
return err
}
// encryptWithConfig encrypts an unencrypted private key using the passphrase and the config.
func (pk *PrivateKey) encryptWithConfig(passphrase []byte, config *Config) error {
// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config.
func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error {
params, err := s2k.Generate(config.Random(), config.S2K())
if err != nil {
return err
@@ -584,7 +584,7 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error {
} ,
DefaultCipher: CipherAES256,
}
return pk.encryptWithConfig(passphrase, config)
return pk.EncryptWithConfig(passphrase, config)
}
func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) {

View File

@@ -415,6 +415,10 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) {
return
}
if len(pk.p.Bytes()) == 0 {
return errors.StructuralError("empty EdDSA public key")
}
pub := eddsa.NewPublicKey(c)
switch flag := pk.p.Bytes()[0]; flag {
@@ -596,7 +600,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro
}
signed.Write(sig.HashSuffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) {
return errors.SignatureError("hash tag doesn't match")
}

View File

@@ -904,7 +904,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
if sig.IssuerKeyId != nil && sig.Version == 4 {
keyId := make([]byte, 8)
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId})
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
}
if sig.IssuerFingerprint != nil {
contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...)

View File

@@ -192,13 +192,21 @@ func Generate(rand io.Reader, c *Config) (*Params, error) {
parallelism: argonConfig.Parallelism(),
memoryExp: argonConfig.EncodedMemory(),
}
} else {
// handle IteratedSaltedS2K case
} else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy
hashId, ok := algorithm.HashToHashId(c.hash())
if !ok {
return nil, errors.UnsupportedError("no such hash")
}
params = &Params{
mode: SaltedS2K,
hashId: hashId,
}
} else { // Enforce IteratedSaltedS2K method otherwise
hashId, ok := algorithm.HashToHashId(c.hash())
if !ok {
return nil, errors.UnsupportedError("no such hash")
}
// Enforce iterared + salted method if not Argon 2
if c != nil {
c.S2KMode = IteratedSaltedS2K
}

View File

@@ -7,8 +7,9 @@ import "crypto"
// values.
type Config struct {
// S2K (String to Key) mode, used for key derivation in the context of secret key encryption
// and password-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K has to be selected
// weaker options are not allowed.
// and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used.
// If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true,
// s2k.SaltedS2K can also be used.
// Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it
//(pending standardisation).
// 0 (simple), 1(salted), 3(iterated), 4(argon2)
@@ -35,6 +36,13 @@ type Config struct {
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
// Indicates whether the passphrase passed by the application is a
// high-entropy key (e.g. it's randomly generated or derived from
// another passphrase using a strong key derivation function).
// When true, allows the S2KMode to be s2k.SaltedS2K.
// When the passphrase is not a high-entropy key, using SaltedS2K is
// insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08.
PassphraseIsHighEntropy bool
}
// Argon2Config stores the Argon2 parameters

View File

@@ -381,7 +381,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En
}
sig := to[i].PrimaryIdentity().SelfSignature
if sig.SEIPDv2 == false {
if !sig.SEIPDv2 {
aeadSupported = false
}

View File

@@ -22,11 +22,11 @@ func (k *Key) clamp(in *Key) *Key {
// isValidPubKey verifies if the public key is not a low-order point.
func (k *Key) isValidPubKey() bool {
fp.Modp((*fp.Elt)(k))
isLowOrder := false
var isLowOrder int
for _, P := range lowOrderPoints {
isLowOrder = isLowOrder || subtle.ConstantTimeCompare(P[:], k[:]) != 0
isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
}
return !isLowOrder
return isLowOrder == 0
}
// KeyGen obtains a public key given a secret key.

View File

@@ -22,11 +22,11 @@ func (k *Key) clamp(in *Key) *Key {
// isValidPubKey verifies if the public key is not a low-order point.
func (k *Key) isValidPubKey() bool {
fp.Modp((*fp.Elt)(k))
isLowOrder := false
var isLowOrder int
for _, P := range lowOrderPoints {
isLowOrder = isLowOrder || subtle.ConstantTimeCompare(P[:], k[:]) != 0
isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
}
return !isLowOrder
return isLowOrder == 0
}
// KeyGen obtains a public key given a secret key.

View File

@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || appengine || gccgo
// +build !amd64 appengine gccgo
package sha3
// KeccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
// nolint:funlen
func KeccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.

View File

@@ -1,14 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && !appengine && !gccgo
// +build amd64,!appengine,!gccgo
package sha3
// This function is implemented in keccakf_amd64.s.
//go:noescape
func KeccakF1600(state *[25]uint64)

View File

@@ -1,390 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage
// Offsets in state
#define _ba (0*8)
#define _be (1*8)
#define _bi (2*8)
#define _bo (3*8)
#define _bu (4*8)
#define _ga (5*8)
#define _ge (6*8)
#define _gi (7*8)
#define _go (8*8)
#define _gu (9*8)
#define _ka (10*8)
#define _ke (11*8)
#define _ki (12*8)
#define _ko (13*8)
#define _ku (14*8)
#define _ma (15*8)
#define _me (16*8)
#define _mi (17*8)
#define _mo (18*8)
#define _mu (19*8)
#define _sa (20*8)
#define _se (21*8)
#define _si (22*8)
#define _so (23*8)
#define _su (24*8)
// Temporary registers
#define rT1 AX
// Round vars
#define rpState DI
#define rpStack SP
#define rDa BX
#define rDe CX
#define rDi DX
#define rDo R8
#define rDu R9
#define rBa R10
#define rBe R11
#define rBi R12
#define rBo R13
#define rBu R14
#define rCa SI
#define rCe BP
#define rCi rBi
#define rCo rBo
#define rCu R15
#define MOVQ_RBI_RCE MOVQ rBi, rCe
#define XORQ_RT1_RCA XORQ rT1, rCa
#define XORQ_RT1_RCE XORQ rT1, rCe
#define XORQ_RBA_RCU XORQ rBa, rCu
#define XORQ_RBE_RCU XORQ rBe, rCu
#define XORQ_RDU_RCU XORQ rDu, rCu
#define XORQ_RDA_RCA XORQ rDa, rCa
#define XORQ_RDE_RCE XORQ rDe, rCe
#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
/* Prepare round */ \
MOVQ rCe, rDa; \
ROLQ $1, rDa; \
\
MOVQ _bi(iState), rCi; \
XORQ _gi(iState), rDi; \
XORQ rCu, rDa; \
XORQ _ki(iState), rCi; \
XORQ _mi(iState), rDi; \
XORQ rDi, rCi; \
\
MOVQ rCi, rDe; \
ROLQ $1, rDe; \
\
MOVQ _bo(iState), rCo; \
XORQ _go(iState), rDo; \
XORQ rCa, rDe; \
XORQ _ko(iState), rCo; \
XORQ _mo(iState), rDo; \
XORQ rDo, rCo; \
\
MOVQ rCo, rDi; \
ROLQ $1, rDi; \
\
MOVQ rCu, rDo; \
XORQ rCe, rDi; \
ROLQ $1, rDo; \
\
MOVQ rCa, rDu; \
XORQ rCi, rDo; \
ROLQ $1, rDu; \
\
/* Result b */ \
MOVQ _ba(iState), rBa; \
MOVQ _ge(iState), rBe; \
XORQ rCo, rDu; \
MOVQ _ki(iState), rBi; \
MOVQ _mo(iState), rBo; \
MOVQ _su(iState), rBu; \
XORQ rDe, rBe; \
ROLQ $44, rBe; \
XORQ rDi, rBi; \
XORQ rDa, rBa; \
ROLQ $43, rBi; \
\
MOVQ rBe, rCa; \
MOVQ rc, rT1; \
ORQ rBi, rCa; \
XORQ rBa, rT1; \
XORQ rT1, rCa; \
MOVQ rCa, _ba(oState); \
\
XORQ rDu, rBu; \
ROLQ $14, rBu; \
MOVQ rBa, rCu; \
ANDQ rBe, rCu; \
XORQ rBu, rCu; \
MOVQ rCu, _bu(oState); \
\
XORQ rDo, rBo; \
ROLQ $21, rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _bi(oState); \
\
NOTQ rBi; \
ORQ rBa, rBu; \
ORQ rBo, rBi; \
XORQ rBo, rBu; \
XORQ rBe, rBi; \
MOVQ rBu, _bo(oState); \
MOVQ rBi, _be(oState); \
B_RBI_RCE; \
\
/* Result g */ \
MOVQ _gu(iState), rBe; \
XORQ rDu, rBe; \
MOVQ _ka(iState), rBi; \
ROLQ $20, rBe; \
XORQ rDa, rBi; \
ROLQ $3, rBi; \
MOVQ _bo(iState), rBa; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDo, rBa; \
MOVQ _me(iState), rBo; \
MOVQ _si(iState), rBu; \
ROLQ $28, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ga(oState); \
G_RT1_RCA; \
\
XORQ rDe, rBo; \
ROLQ $45, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ge(oState); \
G_RT1_RCE; \
\
XORQ rDi, rBu; \
ROLQ $61, rBu; \
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _go(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _gu(oState); \
NOTQ rBu; \
G_RBA_RCU; \
\
ORQ rBu, rBo; \
XORQ rBi, rBo; \
MOVQ rBo, _gi(oState); \
\
/* Result k */ \
MOVQ _be(iState), rBa; \
MOVQ _gi(iState), rBe; \
MOVQ _ko(iState), rBi; \
MOVQ _mu(iState), rBo; \
MOVQ _sa(iState), rBu; \
XORQ rDi, rBe; \
ROLQ $6, rBe; \
XORQ rDo, rBi; \
ROLQ $25, rBi; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDe, rBa; \
ROLQ $1, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ka(oState); \
K_RT1_RCA; \
\
XORQ rDu, rBo; \
ROLQ $8, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ke(oState); \
K_RT1_RCE; \
\
XORQ rDa, rBu; \
ROLQ $18, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _ki(oState); \
\
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _ko(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _ku(oState); \
K_RBA_RCU; \
\
/* Result m */ \
MOVQ _ga(iState), rBe; \
XORQ rDa, rBe; \
MOVQ _ke(iState), rBi; \
ROLQ $36, rBe; \
XORQ rDe, rBi; \
MOVQ _bu(iState), rBa; \
ROLQ $10, rBi; \
MOVQ rBe, rT1; \
MOVQ _mi(iState), rBo; \
ANDQ rBi, rT1; \
XORQ rDu, rBa; \
MOVQ _so(iState), rBu; \
ROLQ $27, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ma(oState); \
M_RT1_RCA; \
\
XORQ rDi, rBo; \
ROLQ $15, rBo; \
MOVQ rBi, rT1; \
ORQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _me(oState); \
M_RT1_RCE; \
\
XORQ rDo, rBu; \
ROLQ $56, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ORQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _mi(oState); \
\
ORQ rBa, rBe; \
XORQ rBu, rBe; \
MOVQ rBe, _mu(oState); \
\
ANDQ rBa, rBu; \
XORQ rBo, rBu; \
MOVQ rBu, _mo(oState); \
M_RBE_RCU; \
\
/* Result s */ \
MOVQ _bi(iState), rBa; \
MOVQ _go(iState), rBe; \
MOVQ _ku(iState), rBi; \
XORQ rDi, rBa; \
MOVQ _ma(iState), rBo; \
ROLQ $62, rBa; \
XORQ rDo, rBe; \
MOVQ _se(iState), rBu; \
ROLQ $55, rBe; \
\
XORQ rDu, rBi; \
MOVQ rBa, rDu; \
XORQ rDe, rBu; \
ROLQ $2, rBu; \
ANDQ rBe, rDu; \
XORQ rBu, rDu; \
MOVQ rDu, _su(oState); \
\
ROLQ $39, rBi; \
S_RDU_RCU; \
NOTQ rBe; \
XORQ rDa, rBo; \
MOVQ rBe, rDa; \
ANDQ rBi, rDa; \
XORQ rBa, rDa; \
MOVQ rDa, _sa(oState); \
S_RDA_RCA; \
\
ROLQ $41, rBo; \
MOVQ rBi, rDe; \
ORQ rBo, rDe; \
XORQ rBe, rDe; \
MOVQ rDe, _se(oState); \
S_RDE_RCE; \
\
MOVQ rBo, rDi; \
MOVQ rBu, rDo; \
ANDQ rBu, rDi; \
ORQ rBa, rDo; \
XORQ rBi, rDi; \
XORQ rBo, rDo; \
MOVQ rDi, _si(oState); \
MOVQ rDo, _so(oState) \
// func KeccakF1600(state *[25]uint64)
TEXT ·KeccakF1600(SB), 0, $200-8
MOVQ state+0(FP), rpState
// Convert the user state into an internal state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
// Execute the KeccakF permutation
MOVQ _ba(rpState), rCa
MOVQ _be(rpState), rCe
MOVQ _bu(rpState), rCu
XORQ _ga(rpState), rCa
XORQ _ge(rpState), rCe
XORQ _gu(rpState), rCu
XORQ _ka(rpState), rCa
XORQ _ke(rpState), rCe
XORQ _ku(rpState), rCu
XORQ _ma(rpState), rCa
XORQ _me(rpState), rCe
XORQ _mu(rpState), rCu
XORQ _sa(rpState), rCa
XORQ _se(rpState), rCe
MOVQ _si(rpState), rDi
MOVQ _so(rpState), rDo
XORQ _su(rpState), rCu
mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
// Revert the internal state to the user state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
RET

View File

@@ -99,6 +99,7 @@
// Uses: AX, DX, R8-R15, FLAGS
// Instr: x86_64, bmi2, adx
#define integerMulAdx(z,x,y) \
MOVL $0,R15; \
MOVQ 0+y, DX; XORL AX, AX; \
MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \
MULXQ 8+x, AX, R9; ADCXQ AX, R8; \

View File

@@ -158,6 +158,7 @@
// Uses: AX, DX, R8-R15, FLAGS
// Instr: x86_64, bmi2, adx
#define integerMulAdx(z,x,y) \
MOVL $0,R15; \
MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \
MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \
MULXQ 8+x, AX, R10; ADCXQ AX, R9; \

View File

@@ -26,10 +26,16 @@ import (
"time"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// maxResets is the no.of times the Copy() method can tolerate a reset of the body
const maxResets = 5
var ErrReset = errors.New("writer has been reset")
var bufPool = sync.Pool{
New: func() interface{} {
buffer := make([]byte, 1<<20)
@@ -80,7 +86,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
return fmt.Errorf("failed to open writer: %w", err)
}
return nil // all ready present
return nil // already present
}
defer cw.Close()
@@ -131,35 +137,63 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
// the size or digest is unknown, these values may be empty.
//
// Copy is buffered, so no need to wrap reader in buffered io.
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
ws, err := cw.Status()
if err != nil {
return fmt.Errorf("failed to get status: %w", err)
}
r := or
if ws.Offset > 0 {
r, err = seekReader(r, ws.Offset, size)
r, err = seekReader(or, ws.Offset, size)
if err != nil {
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
}
}
copied, err := copyWithBuffer(cw, r)
if err != nil {
return fmt.Errorf("failed to copy: %w", err)
}
if size != 0 && copied < size-ws.Offset {
// Short writes would return its own error, this indicates a read failure
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
}
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
for i := 0; i < maxResets; i++ {
if i >= 1 {
log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset")
}
copied, err := copyWithBuffer(cw, r)
if errors.Is(err, ErrReset) {
ws, err := cw.Status()
if err != nil {
return fmt.Errorf("failed to get status: %w", err)
}
r, err = seekReader(or, ws.Offset, size)
if err != nil {
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
}
continue
}
if err != nil {
return fmt.Errorf("failed to copy: %w", err)
}
if size != 0 && copied < size-ws.Offset {
// Short writes would return its own error, this indicates a read failure
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
}
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
if errors.Is(err, ErrReset) {
ws, err := cw.Status()
if err != nil {
return fmt.Errorf("failed to get status: %w", err)
}
r, err = seekReader(or, ws.Offset, size)
if err != nil {
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
}
continue
}
if !errdefs.IsAlreadyExists(err) {
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
}
}
return nil
}
return nil
log.G(ctx).WithField("digest", expected).Errorf("failed to copy after %d retries", maxResets)
return fmt.Errorf("failed to copy after %d retries", maxResets)
}
// CopyReaderAt copies to a writer from a given reader at for the given

View File

@@ -34,7 +34,7 @@ import (
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -505,6 +505,7 @@ func (s *store) resumeStatus(ref string, total int64, digester digest.Digester)
return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total)
}
//nolint:dupword
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
fp, err := os.Open(data)
if err != nil {
@@ -643,7 +644,6 @@ func (s *store) ingestRoot(ref string) string {
// - root: entire ingest directory
// - ref: name of the starting ref, must be unique
// - data: file where data is written
//
func (s *store) ingestPaths(ref string) (string, string, string) {
var (
fp = s.ingestRoot(ref)

View File

@@ -65,7 +65,6 @@
// ```
// name==foo,labels.bar
// ```
//
package filters
import (

View File

@@ -45,7 +45,6 @@ field := quoted | [A-Za-z] [A-Za-z0-9_]+
operator := "==" | "!=" | "~="
value := quoted | [^\s,]+
quoted := <go string syntax>
*/
func Parse(s string) (Filter, error) {
// special case empty to match all

View File

@@ -31,10 +31,10 @@ var errQuoteSyntax = errors.New("quote syntax error")
// or character literal represented by the string s.
// It returns four values:
//
// 1) value, the decoded Unicode code point or byte value;
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
// 3) tail, the remainder of the string after the character; and
// 4) an error that will be nil if the character is syntactically valid.
// 1. value, the decoded Unicode code point or byte value;
// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
// 3. tail, the remainder of the string after the character; and
// 4. an error that will be nil if the character is syntactically valid.
//
// The second argument, quote, specifies the type of literal being parsed
// and therefore which escaped quote character is permitted.

View File

@@ -46,10 +46,14 @@ type matchComparer struct {
// Match matches platform with the same windows major, minor
// and build version.
func (m matchComparer) Match(p imagespec.Platform) bool {
if m.defaults.Match(p) {
// TODO(windows): Figure out whether OSVersion is deprecated.
return strings.HasPrefix(p.OSVersion, m.osVersionPrefix)
func (m matchComparer) Match(p specs.Platform) bool {
match := m.defaults.Match(p)
if match && p.OS == "windows" {
if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) {
return true
}
return p.OSVersion == ""
}
return false
}

View File

@@ -27,40 +27,40 @@
// The vast majority of use cases should simply use the match function with
// user input. The first step is to parse a specifier into a matcher:
//
// m, err := Parse("linux")
// if err != nil { ... }
// m, err := Parse("linux")
// if err != nil { ... }
//
// Once you have a matcher, use it to match against the platform declared by a
// component, typically from an image or runtime. Since extracting an images
// platform is a little more involved, we'll use an example against the
// platform default:
//
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
//
// This can be composed in loops for resolving runtimes or used as a filter for
// fetch and select images.
//
// More details of the specifier syntax and platform spec follow.
//
// Declaring Platform Support
// # Declaring Platform Support
//
// Components that have strict platform requirements should use the OCI
// platform specification to declare their support. Typically, this will be
// images and runtimes that should make these declaring which platform they
// support specifically. This looks roughly as follows:
//
// type Platform struct {
// Architecture string
// OS string
// Variant string
// }
// type Platform struct {
// Architecture string
// OS string
// Variant string
// }
//
// Most images and runtimes should at least set Architecture and OS, according
// to their GOARCH and GOOS values, respectively (follow the OCI image
// specification when in doubt). ARM should set variant under certain
// discussions, which are outlined below.
//
// Platform Specifiers
// # Platform Specifiers
//
// While the OCI platform specifications provide a tool for components to
// specify structured information, user input typically doesn't need the full
@@ -77,7 +77,7 @@
// where the architecture may be known but a runtime may support images from
// different operating systems.
//
// Normalization
// # Normalization
//
// Because not all users are familiar with the way the Go runtime represents
// platforms, several normalizations have been provided to make this package
@@ -85,17 +85,17 @@
//
// The following are performed for architectures:
//
// Value Normalized
// aarch64 arm64
// armhf arm
// armel arm/v6
// i386 386
// x86_64 amd64
// x86-64 amd64
// Value Normalized
// aarch64 arm64
// armhf arm
// armel arm/v6
// i386 386
// x86_64 amd64
// x86-64 amd64
//
// We also normalize the operating system `macos` to `darwin`.
//
// ARM Support
// # ARM Support
//
// To qualify ARM architecture, the Variant field is used to qualify the arm
// version. The most common arm version, v7, is represented without the variant

View File

@@ -134,9 +134,6 @@ func parseValueAndParams(header string) (value string, params map[string]string)
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)

View File

@@ -24,6 +24,7 @@ import (
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
@@ -261,27 +262,20 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
// TODO: Support chunked upload
pr, pw := io.Pipe()
respC := make(chan response, 1)
body := io.NopCloser(pr)
pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest)
req.body = func() (io.ReadCloser, error) {
if body == nil {
return nil, errors.New("cannot reuse body, request must be retried")
}
// Only use the body once since pipe cannot be seeked
ob := body
body = nil
return ob, nil
pr, pw := io.Pipe()
pushw.setPipe(pw)
return io.NopCloser(pr), nil
}
req.size = desc.Size
go func() {
defer close(respC)
resp, err := req.doWithRetries(ctx, nil)
if err != nil {
respC <- response{err: err}
pr.CloseWithError(err)
pushw.setError(err)
pushw.Close()
return
}
@@ -290,20 +284,13 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
default:
err := remoteserrors.NewUnexpectedStatusErr(resp)
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
pr.CloseWithError(err)
pushw.setError(err)
pushw.Close()
}
respC <- response{Response: resp}
pushw.setResponse(resp)
}()
return &pushWriter{
base: p.dockerBase,
ref: ref,
pipe: pw,
responseC: respC,
isManifest: isManifest,
expected: desc.Digest,
tracker: p.tracker,
}, nil
return pushw, nil
}
func getManifestPath(object string, dgst digest.Digest) []string {
@@ -325,29 +312,89 @@ func getManifestPath(object string, dgst digest.Digest) []string {
return []string{"manifests", object}
}
type response struct {
*http.Response
err error
}
type pushWriter struct {
base *dockerBase
ref string
pipe *io.PipeWriter
responseC <-chan response
pipe *io.PipeWriter
pipeC chan *io.PipeWriter
respC chan *http.Response
closeOnce sync.Once
errC chan error
isManifest bool
expected digest.Digest
tracker StatusTracker
}
func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter {
// Initialize and create response
return &pushWriter{
base: db,
ref: ref,
expected: expected,
tracker: tracker,
pipeC: make(chan *io.PipeWriter, 1),
respC: make(chan *http.Response, 1),
errC: make(chan error, 1),
isManifest: isManifest,
}
}
func (pw *pushWriter) setPipe(p *io.PipeWriter) {
pw.pipeC <- p
}
func (pw *pushWriter) setError(err error) {
pw.errC <- err
}
func (pw *pushWriter) setResponse(resp *http.Response) {
pw.respC <- resp
}
func (pw *pushWriter) Write(p []byte) (n int, err error) {
status, err := pw.tracker.GetStatus(pw.ref)
if err != nil {
return n, err
}
if pw.pipe == nil {
p, ok := <-pw.pipeC
if !ok {
return 0, io.ErrClosedPipe
}
pw.pipe = p
} else {
select {
case p, ok := <-pw.pipeC:
if !ok {
return 0, io.ErrClosedPipe
}
pw.pipe.CloseWithError(content.ErrReset)
pw.pipe = p
// If content has already been written, the bytes
// cannot be written and the caller must reset
status.Offset = 0
status.UpdatedAt = time.Now()
pw.tracker.SetStatus(pw.ref, status)
return 0, content.ErrReset
default:
}
}
n, err = pw.pipe.Write(p)
if errors.Is(err, io.ErrClosedPipe) {
// if the pipe is closed, we might have the original error on the error
// channel - so we should try and get it
select {
case err2 := <-pw.errC:
err = err2
default:
}
}
status.Offset += int64(n)
status.UpdatedAt = time.Now()
pw.tracker.SetStatus(pw.ref, status)
@@ -355,13 +402,21 @@ func (pw *pushWriter) Write(p []byte) (n int, err error) {
}
func (pw *pushWriter) Close() error {
status, err := pw.tracker.GetStatus(pw.ref)
if err == nil && !status.Committed {
// Closing an incomplete writer. Record this as an error so that following write can retry it.
status.ErrClosed = errors.New("closed incomplete writer")
pw.tracker.SetStatus(pw.ref, status)
// Ensure pipeC is closed but handle `Close()` being
// called multiple times without panicking
pw.closeOnce.Do(func() {
close(pw.pipeC)
})
if pw.pipe != nil {
status, err := pw.tracker.GetStatus(pw.ref)
if err == nil && !status.Committed {
// Closing an incomplete writer. Record this as an error so that following write can retry it.
status.ErrClosed = errors.New("closed incomplete writer")
pw.tracker.SetStatus(pw.ref, status)
}
return pw.pipe.Close()
}
return pw.pipe.Close()
return nil
}
func (pw *pushWriter) Status() (content.Status, error) {
@@ -380,7 +435,7 @@ func (pw *pushWriter) Digest() digest.Digest {
func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
// Check whether read has already thrown an error
if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe {
if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) {
return fmt.Errorf("pipe error before commit: %w", err)
}
@@ -388,18 +443,40 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
return err
}
// TODO: timeout waiting for response
resp := <-pw.responseC
if resp.err != nil {
return resp.err
var resp *http.Response
select {
case err := <-pw.errC:
return err
case resp = <-pw.respC:
defer resp.Body.Close()
case p, ok := <-pw.pipeC:
// check whether the pipe has changed in the commit, because sometimes Write
// can complete successfully, but the pipe may have changed. In that case, the
// content needs to be reset.
if !ok {
return io.ErrClosedPipe
}
pw.pipe.CloseWithError(content.ErrReset)
pw.pipe = p
// If content has already been written, the bytes
// cannot be written again and the caller must reset
status, err := pw.tracker.GetStatus(pw.ref)
if err != nil {
return err
}
status.Offset = 0
status.UpdatedAt = time.Now()
pw.tracker.SetStatus(pw.ref, status)
return content.ErrReset
}
defer resp.Response.Body.Close()
// 201 is specified return status, some registries return
// 200, 202 or 204.
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted:
default:
return remoteserrors.NewUnexpectedStatusErr(resp.Response)
return remoteserrors.NewUnexpectedStatusErr(resp)
}
status, err := pw.tracker.GetStatus(pw.ref)

View File

@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -667,3 +668,17 @@ func responseFields(resp *http.Response) logrus.Fields {
return logrus.Fields(fields)
}
// IsLocalhost checks if the registry host is local.
func IsLocalhost(host string) bool {
if h, _, err := net.SplitHostPort(host); err == nil {
host = h
}
if host == "localhost" {
return true
}
ip := net.ParseIP(host)
return ip.IsLoopback()
}

View File

@@ -257,8 +257,8 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st
// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed.
//
// This is based on the media type of the content:
// - application/vnd.oci.image.layer.nondistributable
// - application/vnd.docker.image.rootfs.foreign
// - application/vnd.oci.image.layer.nondistributable
// - application/vnd.docker.image.rootfs.foreign
func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if images.IsNonDistributable(desc.MediaType) {

View File

@@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.6.2+unknown"
Version = "1.6.20+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@@ -21,3 +21,5 @@ _testmain.go
*.exe
*.test
coverage.txt

View File

@@ -3,9 +3,11 @@ wincred
Go wrapper around the Windows Credential Manager API functions.
![Go](https://github.com/danieljoos/wincred/workflows/Go/badge.svg)
[![GoDoc](https://godoc.org/github.com/danieljoos/wincred?status.svg)](https://godoc.org/github.com/danieljoos/wincred)
[![GitHub release](https://img.shields.io/github/release/danieljoos/wincred.svg?style=flat-square)](https://github.com/danieljoos/wincred/releases/latest)
[![Test Status](https://img.shields.io/github/workflow/status/danieljoos/wincred/test?label=test&logo=github&style=flat-square)](https://github.com/danieljoos/wincred/actions?query=workflow%3Atest)
[![Go Report Card](https://goreportcard.com/badge/github.com/danieljoos/wincred)](https://goreportcard.com/report/github.com/danieljoos/wincred)
[![Codecov](https://img.shields.io/codecov/c/github/danieljoos/wincred?logo=codecov&style=flat-square)](https://codecov.io/gh/danieljoos/wincred)
[![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/danieljoos/wincred)
Installation
------------
@@ -96,3 +98,48 @@ func main() {
}
}
```
Hints
-----
### Encoding
The credential objects simply store byte arrays without specific meaning or encoding.
For sharing between different applications, it might make sense to apply an explicit string encoding - for example **UTF-16 LE** (used nearly everywhere in the Win32 API).
```Go
package main
import (
"fmt"
"os"
"github.com/danieljoos/wincred"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
)
func main() {
cred := wincred.NewGenericCredential("myGoApplication")
encoder := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewEncoder()
blob, _, err := transform.Bytes(encoder, []byte("mysecret"))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
cred.CredentialBlob = blob
err = cred.Write()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
```
### Limitations
The size of a credential blob is limited to **2560 Bytes** by the Windows API.

View File

@@ -5,30 +5,11 @@ package wincred
import (
"encoding/binary"
"reflect"
"syscall"
"time"
"unicode/utf16"
"unsafe"
)
// uf16PtrToString creates a Go string from a pointer to a UTF16 encoded zero-terminated string.
// Such pointers are returned from the Windows API calls.
// The function creates a copy of the string.
func utf16PtrToString(wstr *uint16) string {
if wstr != nil {
for len := 0; ; len++ {
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(wstr)) + uintptr(len)*unsafe.Sizeof(*wstr)) // see https://golang.org/pkg/unsafe/#Pointer (3)
if *(*uint16)(ptr) == 0 {
return string(utf16.Decode(*(*[]uint16)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(wstr)),
Len: len,
Cap: len,
}))))
}
}
}
return ""
}
syscall "golang.org/x/sys/windows"
)
// utf16ToByte creates a byte array from a given UTF 16 char array.
func utf16ToByte(wstr []uint16) (result []byte) {
@@ -41,7 +22,11 @@ func utf16ToByte(wstr []uint16) (result []byte) {
// utf16FromString creates a UTF16 char array from a string.
func utf16FromString(str string) []uint16 {
return syscall.StringToUTF16(str)
res, err := syscall.UTF16FromString(str)
if err != nil {
return []uint16{}
}
return res
}
// goBytes copies the given C byte array to a Go byte array (see `C.GoBytes`).
@@ -65,10 +50,10 @@ func sysToCredential(cred *sysCREDENTIAL) (result *Credential) {
return nil
}
result = new(Credential)
result.Comment = utf16PtrToString(cred.Comment)
result.TargetName = utf16PtrToString(cred.TargetName)
result.TargetAlias = utf16PtrToString(cred.TargetAlias)
result.UserName = utf16PtrToString(cred.UserName)
result.Comment = syscall.UTF16PtrToString(cred.Comment)
result.TargetName = syscall.UTF16PtrToString(cred.TargetName)
result.TargetAlias = syscall.UTF16PtrToString(cred.TargetAlias)
result.UserName = syscall.UTF16PtrToString(cred.UserName)
result.LastWritten = time.Unix(0, cred.LastWritten.Nanoseconds())
result.Persist = CredentialPersistence(cred.Persist)
result.CredentialBlob = goBytes(cred.CredentialBlob, cred.CredentialBlobSize)
@@ -80,7 +65,7 @@ func sysToCredential(cred *sysCREDENTIAL) (result *Credential) {
}))
for i, attr := range attrSlice {
resultAttr := &result.Attributes[i]
resultAttr.Keyword = utf16PtrToString(attr.Keyword)
resultAttr.Keyword = syscall.UTF16PtrToString(attr.Keyword)
resultAttr.Value = goBytes(attr.Value, attr.ValueSize)
}
return result

View File

@@ -4,13 +4,13 @@ package wincred
import (
"reflect"
"syscall"
"unsafe"
syscall "golang.org/x/sys/windows"
)
var (
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
procCredRead proc = modadvapi32.NewProc("CredReadW")
procCredWrite proc = modadvapi32.NewProc("CredWriteW")
procCredDelete proc = modadvapi32.NewProc("CredDeleteW")

View File

@@ -23,7 +23,7 @@ const (
func GetGenericCredential(targetName string) (*GenericCredential, error) {
cred, err := sysCredRead(targetName, sysCRED_TYPE_GENERIC)
if cred != nil {
return &GenericCredential{*cred}, err
return &GenericCredential{Credential: *cred}, err
}
return nil, err
}
@@ -55,7 +55,7 @@ func (t *GenericCredential) Delete() (err error) {
func GetDomainPassword(targetName string) (*DomainPassword, error) {
cred, err := sysCredRead(targetName, sysCRED_TYPE_DOMAIN_PASSWORD)
if cred != nil {
return &DomainPassword{*cred}, err
return &DomainPassword{Credential: *cred}, err
}
return nil, err
}

View File

@@ -8,14 +8,15 @@
* Contributors:
* Red Hat, Inc.
******************************************************************************/
package enricher
import (
"context"
framework "github.com/redhat-developer/alizer/go/pkg/apis/enricher/framework/dotnet"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
utils "github.com/redhat-developer/alizer/go/pkg/utils"
framework "github.com/devfile/alizer/pkg/apis/enricher/framework/dotnet"
"github.com/devfile/alizer/pkg/apis/model"
utils "github.com/devfile/alizer/pkg/utils"
)
type DotNetEnricher struct{}

View File

@@ -27,10 +27,10 @@ import (
"strconv"
"strings"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"github.com/devfile/alizer/pkg/utils/langfiles"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/redhat-developer/alizer/go/pkg/utils/langfiles"
"gopkg.in/yaml.v3"
)

View File

@@ -20,9 +20,9 @@ import (
"path/filepath"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/schema"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/schema"
"github.com/devfile/alizer/pkg/utils"
)
type DotNetDetector struct{}

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -18,8 +18,8 @@ import (
"regexp"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)
@@ -98,6 +98,13 @@ func GetPortWithMatchIndexesGo(content string, matchIndexes []int, toBeReplaced
portPlaceholder := content[matchIndexes[0]:matchIndexes[1]]
// we should end up with something like ".ListenAndServe(PORT"
portPlaceholder = strings.Replace(portPlaceholder, toBeReplaced, "", -1)
// try to replace any string quotes
portPlaceholder = strings.Replace(portPlaceholder, "\"", "", -1)
// check if the placeholder is an IP:PORT
splitedPlaceholder := strings.Split(portPlaceholder, ":")
if len(splitedPlaceholder) > 1 {
portPlaceholder = splitedPlaceholder[len(splitedPlaceholder)-1]
}
// if we are lucky enough portPlaceholder contains a real HOST:PORT otherwise it is a variable/expression
re, err := regexp.Compile(`:*(\d+)`)
if err != nil {

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -0,0 +1,65 @@
/*******************************************************************************
* Copyright (c) 2021 Red Hat, Inc.
* Distributed under license by Red Hat, Inc. All rights reserved.
* This program is made available under the terms of the
* Eclipse Public License v2.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v20.html
*
* Contributors:
* Red Hat, Inc.
******************************************************************************/
package enricher
import (
"regexp"
"strings"
"github.com/devfile/alizer/pkg/utils"
)
type ApplicationPropertiesFile struct {
Dir string
File string
}
// hasFramework uses the build.gradle, groupId, and artifactId to check for framework
func hasFramework(configFile, groupId, artifactId string) (bool, error) {
if utils.IsPathOfWantedFile(configFile, "build.gradle") {
return utils.IsTagInFile(configFile, groupId)
} else if artifactId != "" {
return utils.IsTagInPomXMLFileArtifactId(configFile, groupId, artifactId)
} else {
return utils.IsTagInPomXMLFile(configFile, groupId)
}
}
// GetPortsForJBossFrameworks tries to detect any port information inside javaOpts of configuration
// of a given profiles plugin
func GetPortsForJBossFrameworks(pomFilePath, pluginArtifactId, pluginGroupId string) string {
portPlaceholder := ""
pom, err := utils.GetPomFileContent(pomFilePath)
if err != nil {
return portPlaceholder
}
re := regexp.MustCompile(`jboss.https?.port=\d*`)
// Check for port configuration inside profiles
for _, profile := range pom.Profiles.Profile {
for _, plugin := range profile.Build.Plugins.Plugin {
if !(strings.Contains(plugin.ArtifactId, pluginArtifactId) && strings.Contains(plugin.GroupId, pluginGroupId)) {
continue
}
matchIndexesSlice := re.FindAllStringSubmatchIndex(plugin.Configuration.JavaOpts, -1)
for _, matchIndexes := range matchIndexesSlice {
if len(matchIndexes) > 1 {
portPlaceholder = plugin.Configuration.JavaOpts[matchIndexes[0]:matchIndexes[1]]
for _, httpArg := range []string{"jboss.http.port=", "jboss.https.port="} {
portPlaceholder = strings.Replace(portPlaceholder, httpArg, "", -1)
}
}
}
}
}
return portPlaceholder
}

View File

@@ -14,7 +14,8 @@ package enricher
import (
"context"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type JBossEAPDetector struct{}
@@ -31,5 +32,24 @@ func (o JBossEAPDetector) DoFrameworkDetection(language *model.Language, config
}
func (o JBossEAPDetector) DoPortsDetection(component *model.Component, ctx *context.Context) {
// Not implemented
ports := []int{}
// Fetch the content of xml for this component
paths, err := utils.GetCachedFilePathsFromRoot(component.Path, ctx)
if err != nil {
return
}
pomXML := utils.GetFile(&paths, "pom.xml")
portPlaceholder := GetPortsForJBossFrameworks(pomXML, "eap-maven-plugin", "org.jboss.eap.plugins")
if portPlaceholder == "" {
return
}
if port, err := utils.GetValidPort(portPlaceholder); err == nil {
ports = append(ports, port)
}
if len(ports) > 0 {
component.Ports = ports
return
}
}

View File

@@ -15,8 +15,8 @@ import (
"context"
"os"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"gopkg.in/yaml.v3"
)

View File

@@ -15,8 +15,8 @@ import (
"context"
"encoding/xml"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type OpenLibertyDetector struct{}

View File

@@ -18,8 +18,8 @@ import (
"os"
"path/filepath"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"gopkg.in/yaml.v3"
)

View File

@@ -17,8 +17,8 @@ import (
"io/ioutil"
"path/filepath"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"gopkg.in/yaml.v3"
)

View File

@@ -15,8 +15,8 @@ import (
"context"
"encoding/json"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type VertxDetector struct{}

View File

@@ -14,7 +14,8 @@ package enricher
import (
"context"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type WildFlyDetector struct{}
@@ -30,6 +31,27 @@ func (o WildFlyDetector) DoFrameworkDetection(language *model.Language, config s
}
}
// DoPortsDetection for wildfly fetches the pom.xml and tries to find any javaOpts under
// the wildfly-maven-plugin profiles. If there is one it looks if jboss.http.port is defined.
func (o WildFlyDetector) DoPortsDetection(component *model.Component, ctx *context.Context) {
// Not implemented
ports := []int{}
// Fetch the content of xml for this component
paths, err := utils.GetCachedFilePathsFromRoot(component.Path, ctx)
if err != nil {
return
}
pomXML := utils.GetFile(&paths, "pom.xml")
portPlaceholder := GetPortsForJBossFrameworks(pomXML, "wildfly-maven-plugin", "org.wildfly.plugins")
if portPlaceholder == "" {
return
}
if port, err := utils.GetValidPort(portPlaceholder); err == nil {
ports = append(ports, port)
}
if len(ports) > 0 {
component.Ports = ports
return
}
}

View File

@@ -15,8 +15,8 @@ import (
"context"
"encoding/json"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type AngularCliJson struct {

View File

@@ -18,8 +18,8 @@ import (
"regexp"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type ExpressDetector struct{}

View File

@@ -14,8 +14,8 @@ package enricher
import (
"context"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type NextDetector struct{}

View File

@@ -15,8 +15,8 @@ import (
"path/filepath"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/schema"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/schema"
"github.com/devfile/alizer/pkg/utils"
)
type packageScriptFunc func(schema.PackageJson) string

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type NuxtDetector struct{}

View File

@@ -15,8 +15,8 @@ import (
"context"
"os"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type ReactJsDetector struct{}

View File

@@ -14,8 +14,8 @@ package enricher
import (
"context"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type SvelteDetector struct{}

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type VueDetector struct{}

View File

@@ -13,7 +13,8 @@ package enricher
import (
"context"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type LaravelDetector struct{}
@@ -29,6 +30,12 @@ func (d LaravelDetector) DoFrameworkDetection(language *model.Language, config s
}
}
// DoPortsDetection for Laravel will check if there is any .env file inside the component
// configuring the APP_PORT variable which is dedicated to port configuration.
func (d LaravelDetector) DoPortsDetection(component *model.Component, ctx *context.Context) {
// Not implemented yet
regexes := []string{`APP_PORT=(\d*)`}
ports := utils.GetPortValuesFromEnvFile(component.Path, regexes)
if len(ports) > 0 {
component.Ports = ports
}
}

View File

@@ -11,7 +11,7 @@
package enricher
import "github.com/redhat-developer/alizer/go/pkg/utils"
import "github.com/devfile/alizer/pkg/utils"
// hasFramework uses the composer.json to check for framework
func hasFramework(configFile string, tag string) bool {

View File

@@ -15,8 +15,8 @@ import (
"context"
"regexp"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type DjangoDetector struct{}

View File

@@ -16,8 +16,8 @@ import (
"regexp"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type FlaskDetector struct{}

View File

@@ -12,7 +12,7 @@
package enricher
import (
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/utils"
)
// hasFramework uses all files to check for framework

View File

@@ -16,9 +16,9 @@ import (
"errors"
"io/ioutil"
framework "github.com/redhat-developer/alizer/go/pkg/apis/enricher/framework/go"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
framework "github.com/devfile/alizer/pkg/apis/enricher/framework/go"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"golang.org/x/mod/modfile"
)

View File

@@ -18,9 +18,9 @@ import (
"regexp"
"strings"
framework "github.com/redhat-developer/alizer/go/pkg/apis/enricher/framework/java"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
framework "github.com/devfile/alizer/pkg/apis/enricher/framework/java"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type JavaEnricher struct{}

View File

@@ -16,10 +16,10 @@ import (
"os"
"path/filepath"
framework "github.com/redhat-developer/alizer/go/pkg/apis/enricher/framework/javascript/nodejs"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
langfile "github.com/redhat-developer/alizer/go/pkg/utils/langfiles"
framework "github.com/devfile/alizer/pkg/apis/enricher/framework/javascript/nodejs"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
langfile "github.com/devfile/alizer/pkg/utils/langfiles"
)
type JavaScriptEnricher struct{}

View File

@@ -13,10 +13,10 @@ package enricher
import (
"context"
framework "github.com/redhat-developer/alizer/go/pkg/apis/enricher/framework/php"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
langfile "github.com/redhat-developer/alizer/go/pkg/utils/langfiles"
framework "github.com/devfile/alizer/pkg/apis/enricher/framework/php"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
langfile "github.com/devfile/alizer/pkg/utils/langfiles"
)
type PHPEnricher struct{}

View File

@@ -14,9 +14,9 @@ package enricher
import (
"context"
framework "github.com/redhat-developer/alizer/go/pkg/apis/enricher/framework/python"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
framework "github.com/devfile/alizer/pkg/apis/enricher/framework/python"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
)
type PythonEnricher struct{}

View File

@@ -42,6 +42,12 @@ type Component struct {
Ports []int
}
type Version struct {
SchemaVersion string
Default bool
Version string
}
type DevFileType struct {
Name string
Language string
@@ -49,6 +55,11 @@ type DevFileType struct {
Tags []string
}
type DevfileFilter struct {
MinVersion string
MaxVersion string
}
type ApplicationFileInfo struct {
Dir string
File string

View File

@@ -22,10 +22,10 @@ import (
"regexp"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/enricher"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/redhat-developer/alizer/go/pkg/utils/langfiles"
"github.com/devfile/alizer/pkg/apis/enricher"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"github.com/devfile/alizer/pkg/utils/langfiles"
)
func DetectComponentsInRoot(path string) ([]model.Component, error) {

View File

@@ -21,10 +21,13 @@ import (
"regexp"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
"github.com/hashicorp/go-version"
)
const MinimumAllowedVersion = "2.0.0"
func SelectDevFilesFromTypes(path string, devFileTypes []model.DevFileType) ([]int, error) {
alizerLogger := utils.GetOrCreateLogger()
ctx := context.Background()
@@ -102,15 +105,31 @@ func SelectDevFileUsingLanguagesFromTypes(languages []model.Language, devFileTyp
return devFilesIndexes[0], nil
}
func MatchDevfiles(path string, url string, filter model.DevfileFilter) ([]model.DevFileType, error) {
alizerLogger := utils.GetOrCreateLogger()
alizerLogger.V(0).Info("Starting devfile matching")
alizerLogger.V(1).Info(fmt.Sprintf("Downloading devfiles from registry %s", url))
devFileTypesFromRegistry, err := downloadDevFileTypesFromRegistry(url, filter)
if err != nil {
return []model.DevFileType{}, err
}
return selectDevfiles(path, devFileTypesFromRegistry)
}
func SelectDevFilesFromRegistry(path string, url string) ([]model.DevFileType, error) {
alizerLogger := utils.GetOrCreateLogger()
alizerLogger.V(0).Info("Starting devfile matching")
alizerLogger.V(1).Info(fmt.Sprintf("Downloading devfiles from registry %s", url))
devFileTypesFromRegistry, err := downloadDevFileTypesFromRegistry(url)
devFileTypesFromRegistry, err := downloadDevFileTypesFromRegistry(url, model.DevfileFilter{MinVersion: "", MaxVersion: ""})
if err != nil {
return []model.DevFileType{}, err
}
alizerLogger.V(1).Info(fmt.Sprintf("Fetched %d devfiles", len(devFileTypesFromRegistry)))
return selectDevfiles(path, devFileTypesFromRegistry)
}
func selectDevfiles(path string, devFileTypesFromRegistry []model.DevFileType) ([]model.DevFileType, error) {
indexes, err := SelectDevFilesFromTypes(path, devFileTypesFromRegistry)
if err != nil {
return []model.DevFileType{}, err
@@ -122,10 +141,11 @@ func SelectDevFilesFromRegistry(path string, url string) ([]model.DevFileType, e
}
return devFileTypes, nil
}
func SelectDevFileFromRegistry(path string, url string) (model.DevFileType, error) {
devFileTypes, err := downloadDevFileTypesFromRegistry(url)
devFileTypes, err := downloadDevFileTypesFromRegistry(url, model.DevfileFilter{MinVersion: "", MaxVersion: ""})
if err != nil {
return model.DevFileType{}, err
}
@@ -137,17 +157,63 @@ func SelectDevFileFromRegistry(path string, url string) (model.DevFileType, erro
return devFileTypes[index], nil
}
func downloadDevFileTypesFromRegistry(url string) ([]model.DevFileType, error) {
url = adaptUrl(url)
// Get the data
resp, err := http.Get(url)
func GetUrlWithVersions(url, minVersion, maxVersion string) (string, error) {
minAllowedVersion, err := version.NewVersion(MinimumAllowedVersion)
if err != nil {
// retry by appending index to url
url = appendIndexPath(url)
resp, err = http.Get(url)
return "", nil
}
if minVersion != "" && maxVersion != "" {
minV, err := version.NewVersion(minVersion)
if err != nil {
return []model.DevFileType{}, err
return url, nil
}
maxV, err := version.NewVersion(maxVersion)
if err != nil {
return url, nil
}
if maxV.LessThan(minV) {
return "", fmt.Errorf("max-version cannot be lower than min-version")
}
if maxV.LessThan(minAllowedVersion) || minV.LessThan(minAllowedVersion) {
return "", fmt.Errorf("min and/or max version are lower than the minimum allowed version (2.0.0)")
}
return fmt.Sprintf("%s?minSchemaVersion=%s&maxSchemaVersion=%s", url, minVersion, maxVersion), nil
} else if minVersion != "" {
minV, err := version.NewVersion(minVersion)
if err != nil {
return "", nil
}
if minV.LessThan(minAllowedVersion) {
return "", fmt.Errorf("min version is lower than the minimum allowed version (2.0.0)")
}
return fmt.Sprintf("%s?minSchemaVersion=%s", url, minVersion), nil
} else if maxVersion != "" {
maxV, err := version.NewVersion(maxVersion)
if err != nil {
return "", nil
}
if maxV.LessThan(minAllowedVersion) {
return "", fmt.Errorf("max version is lower than the minimum allowed version (2.0.0)")
}
return fmt.Sprintf("%s?maxSchemaVersion=%s", url, maxVersion), nil
} else {
return url, nil
}
}
func downloadDevFileTypesFromRegistry(url string, filter model.DevfileFilter) ([]model.DevFileType, error) {
url = adaptUrl(url)
tmpUrl := appendIndexPath(url)
url, err := GetUrlWithVersions(tmpUrl, filter.MinVersion, filter.MaxVersion)
if err != nil {
return nil, err
}
// This value is set by the user in order to configure the registry
resp, err := http.Get(url) // #nosec G107
if err != nil {
return []model.DevFileType{}, err
}
defer func() error {
if err := resp.Body.Close(); err != nil {
@@ -177,9 +243,9 @@ func downloadDevFileTypesFromRegistry(url string) ([]model.DevFileType, error) {
func appendIndexPath(url string) string {
if strings.HasSuffix(url, "/") {
return url + "index"
return url + "v2index"
}
return url + "/index"
return url + "/v2index"
}
func adaptUrl(url string) string {

View File

@@ -18,10 +18,10 @@ import (
"sort"
"strings"
"github.com/redhat-developer/alizer/go/pkg/apis/enricher"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/utils"
langfile "github.com/redhat-developer/alizer/go/pkg/utils/langfiles"
"github.com/devfile/alizer/pkg/apis/enricher"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/utils"
langfile "github.com/devfile/alizer/pkg/utils/langfiles"
)
type languageItem struct {

View File

@@ -27,10 +27,13 @@ type Pom struct {
Build struct {
Plugins struct {
Plugin []struct {
GroupId string `xml:"groupId"`
ArtifactId string `xml:"artifactId"`
Version string `xml:"version"`
Scope string `xml:"scope"`
GroupId string `xml:"groupId"`
ArtifactId string `xml:"artifactId"`
Version string `xml:"version"`
Scope string `xml:"scope"`
Configuration struct {
JavaOpts string `xml:"javaOpts"`
} `xml:"configuration"`
} `xml:"plugin"`
} `xml:"plugins"`
} `xml:"build,omitempty"`
@@ -40,10 +43,13 @@ type Pom struct {
Build struct {
Plugins struct {
Plugin []struct {
GroupId string `xml:"groupId"`
ArtifactId string `xml:"artifactId"`
Version string `xml:"version"`
Scope string `xml:"scope"`
GroupId string `xml:"groupId"`
ArtifactId string `xml:"artifactId"`
Version string `xml:"version"`
Scope string `xml:"scope"`
Configuration struct {
JavaOpts string `xml:"javaOpts"`
} `xml:"configuration"`
} `xml:"plugin"`
} `xml:"plugins"`
} `xml:"build,omitempty"`

View File

@@ -26,10 +26,10 @@ import (
"strconv"
"strings"
"github.com/redhat-developer/alizer/go/pkg/utils/langfiles"
"github.com/devfile/alizer/pkg/utils/langfiles"
"github.com/redhat-developer/alizer/go/pkg/apis/model"
"github.com/redhat-developer/alizer/go/pkg/schema"
"github.com/devfile/alizer/pkg/apis/model"
"github.com/devfile/alizer/pkg/schema"
ignore "github.com/sabhiram/go-gitignore"
)
@@ -92,24 +92,24 @@ func IsTagInFile(file string, tag string) (bool, error) {
}
// IsTagInPomXMLFileArtifactId checks if a pom file contains the artifactId.
func IsTagInPomXMLFileArtifactId(pomFilePath, groupdId, artifactId string) (bool, error) {
func IsTagInPomXMLFileArtifactId(pomFilePath, groupId, artifactId string) (bool, error) {
pom, err := GetPomFileContent(pomFilePath)
if err != nil {
return false, err
}
for _, dependency := range pom.Dependencies.Dependency {
if strings.Contains(dependency.ArtifactId, artifactId) && strings.Contains(dependency.GroupId, groupdId) {
if strings.Contains(dependency.ArtifactId, artifactId) && strings.Contains(dependency.GroupId, groupId) {
return true, nil
}
}
for _, plugin := range pom.Build.Plugins.Plugin {
if strings.Contains(plugin.ArtifactId, artifactId) && strings.Contains(plugin.GroupId, groupdId) {
if strings.Contains(plugin.ArtifactId, artifactId) && strings.Contains(plugin.GroupId, groupId) {
return true, nil
}
}
for _, profile := range pom.Profiles.Profile {
for _, plugin := range profile.Build.Plugins.Plugin {
if strings.Contains(plugin.ArtifactId, artifactId) && strings.Contains(plugin.GroupId, groupdId) {
if strings.Contains(plugin.ArtifactId, artifactId) && strings.Contains(plugin.GroupId, groupId) {
return true, nil
}
}

View File

@@ -16,7 +16,7 @@ import (
"errors"
"strings"
"github.com/redhat-developer/alizer/go/pkg/schema"
"github.com/devfile/alizer/pkg/schema"
"gopkg.in/yaml.v3"
)

View File

@@ -169,8 +169,8 @@ func Erase(helper Helper, reader io.Reader) error {
return helper.Delete(serverURL)
}
//List returns all the serverURLs of keys in
//the OS store as a list of strings
// List returns all the serverURLs of keys in
// the OS store as a list of strings
func List(helper Helper, writer io.Writer) error {
accts, err := helper.List()
if err != nil {
@@ -179,8 +179,8 @@ func List(helper Helper, writer io.Writer) error {
return json.NewEncoder(writer).Encode(accts)
}
//PrintVersion outputs the current version.
// PrintVersion outputs the current version.
func PrintVersion(writer io.Writer) error {
fmt.Fprintln(writer, Version)
fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
return nil
}

View File

@@ -1,4 +1,16 @@
package credentials
// Version holds a string describing the current version
const Version = "0.6.4"
var (
// Name is filled at linking time
Name = ""
// Package is filled at linking time
Package = "github.com/docker/docker-credential-helpers"
// Version holds the complete version number. Filled in at linking time.
Version = "v0.0.0+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.
Revision = ""
)

View File

@@ -2,7 +2,6 @@ package units
import (
"fmt"
"regexp"
"strconv"
"strings"
)
@@ -26,16 +25,17 @@ const (
PiB = 1024 * TiB
)
type unitMap map[string]int64
type unitMap map[byte]int64
var (
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB}
binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB}
)
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
var (
decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
)
func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
i := 0
@@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) {
// Parses the human-readable size string into the amount it represents.
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
matches := sizeRegex.FindStringSubmatch(sizeStr)
if len(matches) != 4 {
// TODO: rewrite to use strings.Cut if there's a space
// once Go < 1.18 is deprecated.
sep := strings.LastIndexAny(sizeStr, "01234567890. ")
if sep == -1 {
// There should be at least a digit.
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
var num, sfx string
if sizeStr[sep] != ' ' {
num = sizeStr[:sep+1]
sfx = sizeStr[sep+1:]
} else {
// Omit the space separator.
num = sizeStr[:sep]
sfx = sizeStr[sep+1:]
}
size, err := strconv.ParseFloat(matches[1], 64)
size, err := strconv.ParseFloat(num, 64)
if err != nil {
return -1, err
}
// Backward compatibility: reject negative sizes.
if size < 0 {
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
unitPrefix := strings.ToLower(matches[3])
if mul, ok := uMap[unitPrefix]; ok {
if len(sfx) == 0 {
return int64(size), nil
}
// Process the suffix.
if len(sfx) > 3 { // Too long.
goto badSuffix
}
sfx = strings.ToLower(sfx)
// Trivial case: b suffix.
if sfx[0] == 'b' {
if len(sfx) > 1 { // no extra characters allowed after b.
goto badSuffix
}
return int64(size), nil
}
// A suffix from the map.
if mul, ok := uMap[sfx[0]]; ok {
size *= float64(mul)
} else {
goto badSuffix
}
// The suffix may have extra "b" or "ib" (e.g. KiB or MB).
switch {
case len(sfx) == 2 && sfx[1] != 'b':
goto badSuffix
case len(sfx) == 3 && sfx[1:] != "ib":
goto badSuffix
}
return int64(size), nil
badSuffix:
return -1, fmt.Errorf("invalid suffix: '%s'", sfx)
}

1
vendor/github.com/go-git/gcfg/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
coverage.out

17
vendor/github.com/go-git/gcfg/Makefile generated vendored Normal file
View File

@@ -0,0 +1,17 @@
# General
WORKDIR = $(PWD)
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
# Coverage
COVERAGE_REPORT = coverage.out
COVERAGE_MODE = count
test:
$(GOTEST) ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@@ -1,7 +0,0 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

View File

@@ -1,9 +0,0 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

View File

@@ -3,16 +3,16 @@ package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"gopkg.in/warnings.v0"
"github.com/go-git/gcfg/scanner"
"github.com/go-git/gcfg/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'}
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b', '\n': '\n'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
@@ -224,7 +224,7 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File,
//
// If callback returns an error, ReadWithCallback terminates with an error too.
func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
src, err := ioutil.ReadAll(reader)
src, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -239,7 +239,7 @@ func ReadWithCallback(reader io.Reader, callback func(string, string, string, st
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
src, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -263,7 +263,7 @@ func ReadFileInto(config interface{}, filename string) error {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
src, err := io.ReadAll(f)
if err != nil {
return err
}

View File

@@ -8,7 +8,6 @@
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
@@ -16,9 +15,7 @@ import (
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"github.com/go-git/gcfg/token"
)
@@ -26,13 +23,11 @@ import (
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
@@ -54,7 +49,6 @@ type Scanner struct {
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
@@ -87,7 +81,6 @@ func (s *Scanner) next() {
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
@@ -108,7 +101,6 @@ const (
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
@@ -163,12 +155,13 @@ func (s *Scanner) scanIdentifier() string {
return string(s.src[offs:s.offset])
}
// val indicate if we are scanning a value (vs a header)
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
case '\\', '"', '\n':
// ok
case 'n', 't', 'b':
if val {
@@ -289,7 +282,6 @@ func (s *Scanner) skipWhitespace() {
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()

View File

@@ -2,6 +2,7 @@ package gcfg
import (
"bytes"
"encoding"
"encoding/gob"
"fmt"
"math/big"
@@ -10,8 +11,9 @@ import (
"unicode"
"unicode/utf8"
"github.com/go-git/gcfg/types"
"gopkg.in/warnings.v0"
"github.com/go-git/gcfg/types"
)
type tag struct {
@@ -65,7 +67,7 @@ var setters = []setter{
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
dtu, ok := d.(encoding.TextUnmarshaler)
if !ok {
return errUnsupportedType
}

View File

@@ -2,4 +2,5 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/
.tmp/
.git-dist/

View File

@@ -29,10 +29,16 @@ test:
@echo "running against `git version`"; \
$(GOTEST) -race ./...
TEMP_REPO := $(shell mktemp)
test-sha256:
$(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
cd $(TEMP_REPO) && git fsck
rm -rf $(TEMP_REPO)
test-coverage:
@echo "running against `git version`"; \
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH)
rm -rf $(GIT_DIST_PATH)

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -59,6 +58,8 @@ type Config struct {
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
// RepositoryFormatVersion identifies the repository format and layout version.
RepositoryFormatVersion format.RepositoryFormatVersion
}
User struct {
@@ -96,6 +97,17 @@ type Config struct {
DefaultBranch string
}
Extensions struct {
// ObjectFormat specifies the hash algorithm to use. The
// acceptable values are sha1 and sha256. If not specified,
// sha1 is assumed. It is an error to specify this key unless
// core.repositoryFormatVersion is 1.
//
// This setting must not be changed after repository initialization
// (e.g. clone or init).
ObjectFormat format.ObjectFormat
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
@@ -131,7 +143,7 @@ func NewConfig() *Config {
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@@ -226,28 +238,32 @@ func (c *Config) Validate() error {
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
extensionsSection = "extensions"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
repositoryFormatVersionKey = "repositoryformatversion"
objectFormat = "objectformat"
mirrorKey = "mirror"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
@@ -391,6 +407,7 @@ func (c *Config) unmarshalInit() {
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalExtensions()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
@@ -410,12 +427,24 @@ func (c *Config) Marshal() ([]byte, error) {
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if string(c.Core.RepositoryFormatVersion) != "" {
s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
}
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalExtensions() {
// Extensions are only supported on Version 1, therefore
// ignore them otherwise.
if c.Core.RepositoryFormatVersion == format.Version_1 {
s := c.Raw.Section(extensionsSection)
s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
@@ -549,6 +578,8 @@ type RemoteConfig struct {
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// Mirror indicates that the repository is a mirror of remote.
Mirror bool
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
@@ -602,6 +633,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.Fetch = fetch
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
return nil
}
@@ -634,6 +666,10 @@ func (c *RemoteConfig) marshal() *format.Subsection {
c.raw.SetOption(fetchKey, values...)
}
if c.Mirror {
c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
}
return c.raw
}

Some files were not shown because too many files have changed in this diff Show More