Go: Bump github.com/operator-framework/api from 0.17.6 to 0.17.7 (#7108)

Bumps [github.com/operator-framework/api](https://github.com/operator-framework/api) from 0.17.6 to 0.17.7.
- [Release notes](https://github.com/operator-framework/api/releases)
- [Changelog](https://github.com/operator-framework/api/blob/master/RELEASE.md)
- [Commits](https://github.com/operator-framework/api/compare/v0.17.6...v0.17.7)

---
updated-dependencies:
- dependency-name: github.com/operator-framework/api
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot]
2023-10-09 07:11:27 +00:00
committed by GitHub
parent 868501c3d2
commit 765c1e12e9
38 changed files with 6887 additions and 26 deletions

530
vendor/github.com/golang/protobuf/jsonpb/decode.go generated vendored Normal file
View File

@@ -0,0 +1,530 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONUnmarshalV2 = false
// UnmarshalNext unmarshals the next JSON object from d into m.
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
return new(Unmarshaler).UnmarshalNext(d, m)
}
// Unmarshal unmarshals a JSON object from r into m.
func Unmarshal(r io.Reader, m proto.Message) error {
return new(Unmarshaler).Unmarshal(r, m)
}
// UnmarshalString unmarshals a JSON object from s into m.
func UnmarshalString(s string, m proto.Message) error {
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
}
// Unmarshaler is a configurable object for converting from a JSON
// representation to a protocol buffer object.
type Unmarshaler struct {
// AllowUnknownFields specifies whether to allow messages to contain
// unknown JSON fields, as opposed to failing to unmarshal.
AllowUnknownFields bool
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
// they are unmarshaled from JSON. Messages that implement this should also
// implement JSONPBMarshaler so that the custom format can be produced.
//
// The JSON unmarshaling must follow the JSON to proto specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBUnmarshaler interface {
UnmarshalJSONPB(*Unmarshaler, []byte) error
}
// Unmarshal unmarshals a JSON object from r into m.
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
return u.UnmarshalNext(json.NewDecoder(r), m)
}
// UnmarshalNext unmarshals the next JSON object from d into m.
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
if m == nil {
return errors.New("invalid nil message")
}
// Parse the next JSON object from the stream.
raw := json.RawMessage{}
if err := d.Decode(&raw); err != nil {
return err
}
// Check for custom unmarshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsu, ok := m.(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, raw)
}
mr := proto.MessageReflect(m)
// NOTE: For historical reasons, a top-level null is treated as a noop.
// This is incorrect, but kept for compatibility.
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
return nil
}
if wrapJSONUnmarshalV2 {
// NOTE: If input message is non-empty, we need to preserve merge semantics
// of the old jsonpb implementation. These semantics are not supported by
// the protobuf JSON specification.
isEmpty := true
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
isEmpty = false // at least one iteration implies non-empty
return false
})
if !isEmpty {
// Perform unmarshaling into a newly allocated, empty message.
mr = mr.New()
// Use a defer to copy all unmarshaled fields into the original message.
dst := proto.MessageReflect(m)
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dst.Set(fd, v)
return true
})
}
// Unmarshal using the v2 JSON unmarshaler.
opts := protojson.UnmarshalOptions{
DiscardUnknown: u.AllowUnknownFields,
}
if u.AnyResolver != nil {
opts.Resolver = anyResolver{u.AnyResolver}
}
return opts.Unmarshal(raw, mr.Interface())
} else {
if err := u.unmarshalMessage(mr, raw); err != nil {
return err
}
return protoV2.CheckInitialized(mr.Interface())
}
}
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
md := m.Descriptor()
fds := md.Fields()
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, in)
}
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
return nil
}
switch wellKnownType(md.FullName()) {
case "Any":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
rawTypeURL, ok := jsonObject["@type"]
if !ok {
return errors.New("Any JSON doesn't have '@type'")
}
typeURL, err := unquoteString(string(rawTypeURL))
if err != nil {
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
}
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
var m2 protoreflect.Message
if u.AnyResolver != nil {
mi, err := u.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
if err == protoregistry.NotFound {
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
}
return err
}
m2 = mt.New()
}
if wellKnownType(m2.Descriptor().FullName()) != "" {
rawValue, ok := jsonObject["value"]
if !ok {
return errors.New("Any JSON doesn't have 'value'")
}
if err := u.unmarshalMessage(m2, rawValue); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
} else {
delete(jsonObject, "@type")
rawJSON, err := json.Marshal(jsonObject)
if err != nil {
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
}
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
}
rawWire, err := protoV2.Marshal(m2.Interface())
if err != nil {
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
return nil
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
if err != nil {
return err
}
m.Set(fd, v)
return nil
case "Duration":
v, err := unquoteString(string(in))
if err != nil {
return err
}
d, err := time.ParseDuration(v)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
sec := d.Nanoseconds() / 1e9
nsec := d.Nanoseconds() % 1e9
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Timestamp":
v, err := unquoteString(string(in))
if err != nil {
return err
}
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
sec := t.Unix()
nsec := t.Nanosecond()
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Value":
switch {
case string(in) == "null":
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
case string(in) == "true":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
case string(in) == "false":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
case hasPrefixAndSuffix('"', in, '"'):
s, err := unquoteString(string(in))
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
case hasPrefixAndSuffix('[', in, ']'):
v := m.Mutable(fds.ByNumber(6))
return u.unmarshalMessage(v.Message(), in)
case hasPrefixAndSuffix('{', in, '}'):
v := m.Mutable(fds.ByNumber(5))
return u.unmarshalMessage(v.Message(), in)
default:
f, err := strconv.ParseFloat(string(in), 0)
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
}
return nil
case "ListValue":
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return fmt.Errorf("bad ListValue: %v", err)
}
lv := m.Mutable(fds.ByNumber(1)).List()
for _, raw := range jsonArray {
ve := lv.NewElement()
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
return err
}
lv.Append(ve)
}
return nil
case "Struct":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return fmt.Errorf("bad StructValue: %v", err)
}
mv := m.Mutable(fds.ByNumber(1)).Map()
for key, raw := range jsonObject {
kv := protoreflect.ValueOf(key).MapKey()
vv := mv.NewValue()
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
}
mv.Set(kv, vv)
}
return nil
}
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
// Handle known fields.
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if fd.IsWeak() && fd.Message().IsPlaceholder() {
continue // weak reference is not linked in
}
// Search for any raw JSON value associated with this field.
var raw json.RawMessage
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
name = string(fd.JSONName())
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
// Handle extension fields.
for name, raw := range jsonObject {
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
continue
}
// Resolve the extension field by name.
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(md) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
continue
}
delete(jsonObject, name)
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
if !u.AllowUnknownFields && len(jsonObject) > 0 {
for name := range jsonObject {
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
}
}
return nil
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if fd.Cardinality() == protoreflect.Repeated {
return false
}
if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value"
}
if ed := fd.Enum(); ed != nil {
return ed.FullName() == "google.protobuf.NullValue"
}
return false
}
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
return ok
}
return false
}
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch {
case fd.IsList():
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return v, err
}
lv := v.List()
for _, raw := range jsonArray {
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
if err != nil {
return v, err
}
lv.Append(ve)
}
return v, nil
case fd.IsMap():
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return v, err
}
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
for key, raw := range jsonObject {
var kv protoreflect.MapKey
if kfd.Kind() == protoreflect.StringKind {
kv = protoreflect.ValueOf(key).MapKey()
} else {
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
if err != nil {
return v, err
}
kv = v.MapKey()
}
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
if err != nil {
return v, err
}
mv.Set(kv, vv)
}
return v, nil
default:
return u.unmarshalSingularValue(v, in, fd)
}
}
var nonFinite = map[string]float64{
`"NaN"`: math.NaN(),
`"Infinity"`: math.Inf(+1),
`"-Infinity"`: math.Inf(-1),
}
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch fd.Kind() {
case protoreflect.BoolKind:
return unmarshalValue(in, new(bool))
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
return unmarshalValue(trimQuote(in), new(int32))
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return unmarshalValue(trimQuote(in), new(int64))
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
return unmarshalValue(trimQuote(in), new(uint32))
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return unmarshalValue(trimQuote(in), new(uint64))
case protoreflect.FloatKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat32(float32(f)), nil
}
return unmarshalValue(trimQuote(in), new(float32))
case protoreflect.DoubleKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat64(float64(f)), nil
}
return unmarshalValue(trimQuote(in), new(float64))
case protoreflect.StringKind:
return unmarshalValue(in, new(string))
case protoreflect.BytesKind:
return unmarshalValue(in, new([]byte))
case protoreflect.EnumKind:
if hasPrefixAndSuffix('"', in, '"') {
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
if vd == nil {
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
}
return protoreflect.ValueOfEnum(vd.Number()), nil
}
return unmarshalValue(in, new(protoreflect.EnumNumber))
case protoreflect.MessageKind, protoreflect.GroupKind:
err := u.unmarshalMessage(v.Message(), in)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
}
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
err := json.Unmarshal(in, v)
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
}
func unquoteString(in string) (out string, err error) {
err = json.Unmarshal([]byte(in), &out)
return out, err
}
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
return true
}
return false
}
// trimQuote is like unquoteString but simply strips surrounding quotes.
// This is incorrect, but is behavior done by the legacy implementation.
func trimQuote(in []byte) []byte {
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
in = in[1 : len(in)-1]
}
return in
}

559
vendor/github.com/golang/protobuf/jsonpb/encode.go generated vendored Normal file
View File

@@ -0,0 +1,559 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONMarshalV2 = false
// Marshaler is a configurable object for marshaling protocol buffer messages
// to the specified JSON representation.
type Marshaler struct {
// OrigName specifies whether to use the original protobuf name for fields.
OrigName bool
// EnumsAsInts specifies whether to render enum values as integers,
// as opposed to string values.
EnumsAsInts bool
// EmitDefaults specifies whether to render fields with zero values.
EmitDefaults bool
// Indent controls whether the output is compact or not.
// If empty, the output is compact JSON. Otherwise, every JSON object
// entry and JSON array value will be on its own line.
// Each line will be preceded by repeated copies of Indent, where the
// number of copies is the current indentation depth.
Indent string
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBMarshaler is implemented by protobuf messages that customize the
// way they are marshaled to JSON. Messages that implement this should also
// implement JSONPBUnmarshaler so that the custom format can be parsed.
//
// The JSON marshaling must follow the proto to JSON specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBMarshaler interface {
MarshalJSONPB(*Marshaler) ([]byte, error)
}
// Marshal serializes a protobuf message as JSON into w.
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
b, err := jm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// MarshalToString serializes a protobuf message as JSON in string form.
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
b, err := jm.marshal(m)
if err != nil {
return "", err
}
return string(b), nil
}
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
v := reflect.ValueOf(m)
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, errors.New("Marshal called with nil")
}
// Check for custom marshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsm, ok := m.(JSONPBMarshaler); ok {
return jsm.MarshalJSONPB(jm)
}
if wrapJSONMarshalV2 {
opts := protojson.MarshalOptions{
UseProtoNames: jm.OrigName,
UseEnumNumbers: jm.EnumsAsInts,
EmitUnpopulated: jm.EmitDefaults,
Indent: jm.Indent,
}
if jm.AnyResolver != nil {
opts.Resolver = anyResolver{jm.AnyResolver}
}
return opts.Marshal(proto.MessageReflect(m).Interface())
} else {
// Check for unpopulated required fields first.
m2 := proto.MessageReflect(m)
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
return nil, err
}
w := jsonWriter{Marshaler: jm}
err := w.marshalMessage(m2, "", "")
return w.buf, err
}
}
type jsonWriter struct {
*Marshaler
buf []byte
}
func (w *jsonWriter) write(s string) {
w.buf = append(w.buf, s...)
}
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
b, err := jsm.MarshalJSONPB(w.Marshaler)
if err != nil {
return err
}
if typeURL != "" {
// we are marshaling this object to an Any type
var js map[string]*json.RawMessage
if err = json.Unmarshal(b, &js); err != nil {
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
}
turl, err := json.Marshal(typeURL)
if err != nil {
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
}
js["@type"] = (*json.RawMessage)(&turl)
if b, err = json.Marshal(js); err != nil {
return err
}
}
w.write(string(b))
return nil
}
md := m.Descriptor()
fds := md.Fields()
// Handle well-known types.
const secondInNanos = int64(time.Second / time.Nanosecond)
switch wellKnownType(md.FullName()) {
case "Any":
return w.marshalAny(m, indent)
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
case "Duration":
const maxSecondsInDuration = 315576000000
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
return fmt.Errorf("seconds out of range %v", s)
}
if ns <= -secondInNanos || ns >= secondInNanos {
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
}
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
return errors.New("signs of seconds and nanos do not match")
}
var sign string
if s < 0 || ns < 0 {
sign, s, ns = "-", -1*s, -1*ns
}
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vs"`, x))
return nil
case "Timestamp":
// "RFC 3339, where generated output will always be Z-normalized
// and uses 0, 3, 6 or 9 fractional digits."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if ns < 0 || ns >= secondInNanos {
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
}
t := time.Unix(s, ns).UTC()
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
x := t.Format("2006-01-02T15:04:05.000000000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vZ"`, x))
return nil
case "Value":
// JSON value; which is a null, number, string, bool, object, or array.
od := md.Oneofs().Get(0)
fd := m.WhichOneof(od)
if fd == nil {
return errors.New("nil Value")
}
return w.marshalValue(fd, m.Get(fd), indent)
case "Struct", "ListValue":
// JSON object or array.
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
firstField := true
if typeURL != "" {
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
firstField = false
}
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
if fd == nil {
continue
}
} else {
i++
}
v := m.Get(fd)
if !m.Has(fd) {
if !w.EmitDefaults || fd.ContainingOneof() != nil {
continue
}
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
}
}
if !firstField {
w.writeComma()
}
if err := w.marshalField(fd, v, indent); err != nil {
return err
}
firstField = false
}
// Handle proto2 extensions.
if md.ExtensionRanges().Len() > 0 {
// Collect a sorted list of all extension descriptor and values.
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
if !firstField {
w.writeComma()
}
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
return err
}
firstField = false
}
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) writeComma() {
if w.Indent != "" {
w.write(",\n")
} else {
w.write(",")
}
}
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
// "If the Any contains a value that has a special JSON mapping,
// it will be converted as follows: {"@type": xxx, "value": yyy}.
// Otherwise, the value will be converted into a JSON object,
// and the "@type" field will be inserted to indicate the actual data type."
md := m.Descriptor()
typeURL := m.Get(md.Fields().ByNumber(1)).String()
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
var m2 protoreflect.Message
if w.AnyResolver != nil {
mi, err := w.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
return err
}
m2 = mt.New()
}
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
return err
}
if wellKnownType(m2.Descriptor().FullName()) == "" {
return w.marshalMessage(m2, indent, typeURL)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
w.writeComma()
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
w.write(`"value": `)
} else {
w.write(`"value":`)
}
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
return err
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"@type":`)
if w.Indent != "" {
w.write(" ")
}
b, err := json.Marshal(typeURL)
if err != nil {
return err
}
w.write(string(b))
return nil
}
// marshalField writes field description and value to the Writer.
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"`)
switch {
case fd.IsExtension():
// For message set, use the fname of the message as the extension name.
name := string(fd.FullName())
if isMessageSet(fd.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
w.write("[" + name + "]")
case w.OrigName:
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
w.write(name)
default:
w.write(string(fd.JSONName()))
}
w.write(`":`)
if w.Indent != "" {
w.write(" ")
}
return w.marshalValue(fd, v, indent)
}
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case fd.IsList():
w.write("[")
comma := ""
lv := v.List()
for i := 0; i < lv.Len(); i++ {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write("]")
return nil
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
// Collect a sorted list of all map keys and values.
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
w.write(`{`)
comma := ""
for _, entry := range entries {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
s := fmt.Sprint(entry.key.Interface())
b, err := json.Marshal(s)
if err != nil {
return err
}
w.write(string(b))
w.write(`:`)
if w.Indent != "" {
w.write(` `)
}
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write(`}`)
return nil
default:
return w.marshalSingularValue(fd, v, indent)
}
}
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case !v.IsValid():
w.write("null")
return nil
case fd.Message() != nil:
return w.marshalMessage(v.Message(), indent+w.Indent, "")
case fd.Enum() != nil:
if fd.Enum().FullName() == "google.protobuf.NullValue" {
w.write("null")
return nil
}
vd := fd.Enum().Values().ByNumber(v.Enum())
if vd == nil || w.EnumsAsInts {
w.write(strconv.Itoa(int(v.Enum())))
} else {
w.write(`"` + string(vd.Name()) + `"`)
}
return nil
default:
switch v.Interface().(type) {
case float32, float64:
switch {
case math.IsInf(v.Float(), +1):
w.write(`"Infinity"`)
return nil
case math.IsInf(v.Float(), -1):
w.write(`"-Infinity"`)
return nil
case math.IsNaN(v.Float()):
w.write(`"NaN"`)
return nil
}
case int64, uint64:
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
return nil
}
b, err := json.Marshal(v.Interface())
if err != nil {
return err
}
w.write(string(b))
return nil
}
}

69
vendor/github.com/golang/protobuf/jsonpb/json.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jsonpb provides functionality to marshal and unmarshal between a
// protocol buffer message and JSON. It follows the specification at
// https://developers.google.com/protocol-buffers/docs/proto3#json.
//
// Do not rely on the default behavior of the standard encoding/json package
// when called on generated message types as it does not operate correctly.
//
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
// package instead.
package jsonpb
import (
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// AnyResolver takes a type URL, present in an Any message,
// and resolves it into an instance of the associated message.
type AnyResolver interface {
Resolve(typeURL string) (proto.Message, error)
}
type anyResolver struct{ AnyResolver }
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
return r.FindMessageByURL(string(message))
}
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
m, err := r.Resolve(url)
if err != nil {
return nil, err
}
return protoimpl.X.MessageTypeOf(m), nil
}
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
func wellKnownType(s protoreflect.FullName) string {
if s.Parent() == "google.protobuf" {
switch s.Name() {
case "Empty", "Any",
"BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue",
"Duration", "Timestamp",
"NullValue", "Struct", "Value", "ListValue":
return string(s.Name())
}
}
return ""
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

View File

@@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...).
This does not mean Logrus is dead. Logrus will continue to be maintained for
security, (backwards compatible) bug fixes, and performance (where we are
limited by the interface).
limited by the interface).
I believe Logrus' biggest contribution is to have played a part in today's
widespread use of structured logging in Golang. There doesn't seem to be a
@@ -43,7 +43,7 @@ plain text):
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
```text
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
@@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks:
environment via benchmarks:
```
go test -bench=.*CallerTracing
```
@@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel)
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are

View File

@@ -1,4 +1,4 @@
// Copyright 2020 Google LLC
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.12.2
// protoc v3.21.9
// source: google/rpc/status.proto
package status
@@ -48,11 +48,13 @@ type Status struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
// The status code, which should be an enum value of
// [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
// by the client.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.

101
vendor/google.golang.org/grpc/attributes/attributes.go generated vendored Normal file
View File

@@ -0,0 +1,101 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package attributes
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
// types for keys. Values should not be modified after they are added to an
// Attributes or if they were received from one. If values implement 'Equal(o
// interface{}) bool', it will be called by (*Attributes).Equal to determine
// whether two values with the same key should be considered equal.
type Attributes struct {
m map[interface{}]interface{}
}
// New returns a new Attributes containing the key/value pair.
func New(key, value interface{}) *Attributes {
return &Attributes{m: map[interface{}]interface{}{key: value}}
}
// WithValue returns a new Attributes containing the previous keys and values
// and the new key/value pair. If the same key appears multiple times, the
// last value overwrites all previous values for that key. To remove an
// existing key, use a nil value. value should not be modified later.
func (a *Attributes) WithValue(key, value interface{}) *Attributes {
if a == nil {
return New(key, value)
}
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
for k, v := range a.m {
n.m[k] = v
}
n.m[key] = value
return n
}
// Value returns the value associated with these attributes for key, or nil if
// no value is associated with key. The returned value should not be modified.
func (a *Attributes) Value(key interface{}) interface{} {
if a == nil {
return nil
}
return a.m[key]
}
// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
// bool' is implemented for a value in the attributes, it is called to
// determine if the value matches the one stored in the other attributes. If
// Equal is not implemented, standard equality is used to determine if the two
// values are equal. Note that some types (e.g. maps) aren't comparable by
// default, so they must be wrapped in a struct, or in an alias type, with Equal
// defined.
func (a *Attributes) Equal(o *Attributes) bool {
if a == nil && o == nil {
return true
}
if a == nil || o == nil {
return false
}
if len(a.m) != len(o.m) {
return false
}
for k, v := range a.m {
ov, ok := o.m[k]
if !ok {
// o missing element of a
return false
}
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
if !eq.Equal(ov) {
return false
}
} else if v != ov {
// Fallback to a standard equality check if Value is unimplemented.
return false
}
}
return true
}

View File

@@ -18,7 +18,15 @@
package codes
import "strconv"
import (
"strconv"
"google.golang.org/grpc/internal"
)
func init() {
internal.CanonicalString = canonicalString
}
func (c Code) String() string {
switch c {
@@ -60,3 +68,44 @@ func (c Code) String() string {
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
}
}
func canonicalString(c Code) string {
switch c {
case OK:
return "OK"
case Canceled:
return "CANCELLED"
case Unknown:
return "UNKNOWN"
case InvalidArgument:
return "INVALID_ARGUMENT"
case DeadlineExceeded:
return "DEADLINE_EXCEEDED"
case NotFound:
return "NOT_FOUND"
case AlreadyExists:
return "ALREADY_EXISTS"
case PermissionDenied:
return "PERMISSION_DENIED"
case ResourceExhausted:
return "RESOURCE_EXHAUSTED"
case FailedPrecondition:
return "FAILED_PRECONDITION"
case Aborted:
return "ABORTED"
case OutOfRange:
return "OUT_OF_RANGE"
case Unimplemented:
return "UNIMPLEMENTED"
case Internal:
return "INTERNAL"
case Unavailable:
return "UNAVAILABLE"
case DataLoss:
return "DATA_LOSS"
case Unauthenticated:
return "UNAUTHENTICATED"
default:
return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
}
}

View File

@@ -0,0 +1,94 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package connectivity defines connectivity semantics.
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
package connectivity
import (
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("core")
// State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn.
type State int
func (s State) String() string {
switch s {
case Idle:
return "IDLE"
case Connecting:
return "CONNECTING"
case Ready:
return "READY"
case TransientFailure:
return "TRANSIENT_FAILURE"
case Shutdown:
return "SHUTDOWN"
default:
logger.Errorf("unknown connectivity state: %d", s)
return "INVALID_STATE"
}
}
const (
// Idle indicates the ClientConn is idle.
Idle State = iota
// Connecting indicates the ClientConn is connecting.
Connecting
// Ready indicates the ClientConn is ready for work.
Ready
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
TransientFailure
// Shutdown indicates the ClientConn has started shutting down.
Shutdown
)
// ServingMode indicates the current mode of operation of the server.
//
// Only xDS enabled gRPC servers currently report their serving mode.
type ServingMode int
const (
// ServingModeStarting indicates that the server is starting up.
ServingModeStarting ServingMode = iota
// ServingModeServing indicates that the server contains all required
// configuration and is serving RPCs.
ServingModeServing
// ServingModeNotServing indicates that the server is not accepting new
// connections. Existing connections will be closed gracefully, allowing
// in-progress RPCs to complete. A server enters this mode when it does not
// contain the required configuration to serve RPCs.
ServingModeNotServing
)
func (s ServingMode) String() string {
switch s {
case ServingModeStarting:
return "STARTING"
case ServingModeServing:
return "SERVING"
case ServingModeNotServing:
return "NOT_SERVING"
default:
logger.Errorf("unknown serving mode: %d", s)
return "INVALID_MODE"
}
}

View File

@@ -0,0 +1,291 @@
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package credentials implements various credentials supported by gRPC library,
// which encapsulate all the state needed by a client to authenticate with a
// server and make various assertions, e.g., about the client's identity, role,
// or whether it is authorized to make a particular call.
package credentials // import "google.golang.org/grpc/credentials"
import (
"context"
"errors"
"fmt"
"net"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/attributes"
icredentials "google.golang.org/grpc/internal/credentials"
)
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
// GetRequestMetadata gets the current request metadata, refreshing tokens
// if required. This should be called by the transport layer on each
// request, and the data should be populated in headers or other
// context. If a status code is returned, it will be used as the status for
// the RPC (restricted to an allowable set of codes as defined by gRFC
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
// of leaving it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
RequireTransportSecurity() bool
}
// SecurityLevel defines the protection level on an established connection.
//
// This API is experimental.
type SecurityLevel int
const (
// InvalidSecurityLevel indicates an invalid security level.
// The zero SecurityLevel value is invalid for backward compatibility.
InvalidSecurityLevel SecurityLevel = iota
// NoSecurity indicates a connection is insecure.
NoSecurity
// IntegrityOnly indicates a connection only provides integrity protection.
IntegrityOnly
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
PrivacyAndIntegrity
)
// String returns SecurityLevel in a string format.
func (s SecurityLevel) String() string {
switch s {
case NoSecurity:
return "NoSecurity"
case IntegrityOnly:
return "IntegrityOnly"
case PrivacyAndIntegrity:
return "PrivacyAndIntegrity"
}
return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
}
// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
// It should be embedded in a struct implementing AuthInfo to provide additional information
// about the credentials.
//
// This API is experimental.
type CommonAuthInfo struct {
SecurityLevel SecurityLevel
}
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
return c
}
// ProtocolInfo provides information regarding the gRPC wire protocol version,
// security protocol, security protocol version in use, server name, etc.
type ProtocolInfo struct {
// ProtocolVersion is the gRPC wire protocol version.
ProtocolVersion string
// SecurityProtocol is the security protocol in use.
SecurityProtocol string
// SecurityVersion is the security protocol version. It is a static version string from the
// credentials, not a value that reflects per-connection protocol negotiation. To retrieve
// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
//
// Deprecated: please use Peer.AuthInfo.
SecurityVersion string
// ServerName is the user-configured server name.
ServerName string
}
// AuthInfo defines the common interface for the auth information the users are interested in.
// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
// information about the credentials in it.
type AuthInfo interface {
AuthType() string
}
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
// and the caller should not close rawConn.
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface {
// ClientHandshake does the authentication handshake specified by the
// corresponding authentication protocol on rawConn for clients. It returns
// the authenticated connection and the corresponding auth information
// about the connection. The auth information should embed CommonAuthInfo
// to return additional information about the credentials. Implementations
// must use the provided context to implement timely cancellation. gRPC
// will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
// returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
// Additionally, ClientHandshakeInfo data will be available via the context
// passed to this call.
//
// The second argument to this method is the `:authority` header value used
// while creating new streams on this connection after authentication
// succeeds. Implementations must use this as the server name during the
// authentication handshake.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
// the connection. The auth information should embed CommonAuthInfo to return additional information
// about the credentials.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
// Info provides the ProtocolInfo of this TransportCredentials.
Info() ProtocolInfo
// Clone makes a copy of this TransportCredentials.
Clone() TransportCredentials
// OverrideServerName specifies the value used for the following:
// - verifying the hostname on the returned certificates
// - as SNI in the client's handshake to support virtual hosting
// - as the value for `:authority` header at stream creation time
//
// Deprecated: use grpc.WithAuthority instead. Will be supported
// throughout 1.x.
OverrideServerName(string) error
}
// Bundle is a combination of TransportCredentials and PerRPCCredentials.
//
// It also contains a mode switching method, so it can be used as a combination
// of different credential policies.
//
// Bundle cannot be used together with individual TransportCredentials.
// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
//
// This API is experimental.
type Bundle interface {
// TransportCredentials returns the transport credentials from the Bundle.
//
// Implementations must return non-nil transport credentials. If transport
// security is not needed by the Bundle, implementations may choose to
// return insecure.NewCredentials().
TransportCredentials() TransportCredentials
// PerRPCCredentials returns the per-RPC credentials from the Bundle.
//
// May be nil if per-RPC credentials are not needed.
PerRPCCredentials() PerRPCCredentials
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
// existing Bundle may cause races.
//
// NewWithMode returns nil if the requested mode is not supported.
NewWithMode(mode string) (Bundle, error)
}
// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
//
// This API is experimental.
type RequestInfo struct {
// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
Method string
// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
AuthInfo AuthInfo
}
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
//
// This API is experimental.
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
return ri, ok
}
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
// balancer etc. Individual credential implementations control the actual
// format of the data that they are willing to receive.
//
// This API is experimental.
type ClientHandshakeInfo struct {
// Attributes contains the attributes for the address. It could be provided
// by the gRPC, resolver, balancer etc.
Attributes *attributes.Attributes
}
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
// in ctx.
//
// This API is experimental.
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
return chi
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
//
// This API is experimental.
func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
type internalInfo interface {
GetCommonAuthInfo() CommonAuthInfo
}
if ai == nil {
return errors.New("AuthInfo is nil")
}
if ci, ok := ai.(internalInfo); ok {
// CommonAuthInfo.SecurityLevel has an invalid value.
if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
return nil
}
if ci.GetCommonAuthInfo().SecurityLevel < level {
return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
}
}
// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
return nil
}
// ChannelzSecurityInfo defines the interface that security protocols should implement
// in order to provide security info to channelz.
//
// This API is experimental.
type ChannelzSecurityInfo interface {
GetSecurityValue() ChannelzSecurityValue
}
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
// and *OtherChannelzSecurityValue.
//
// This API is experimental.
type ChannelzSecurityValue interface {
isChannelzSecurityValue()
}
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
// from GetSecurityValue(), which contains protocol specific security info. Note
// the Value field will be sent to users of channelz requesting channel info, and
// thus sensitive info should better be avoided.
//
// This API is experimental.
type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string
Value proto.Message
}

236
vendor/google.golang.org/grpc/credentials/tls.go generated vendored Normal file
View File

@@ -0,0 +1,236 @@
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/url"
"os"
credinternal "google.golang.org/grpc/internal/credentials"
)
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
State tls.ConnectionState
CommonAuthInfo
// This API is experimental.
SPIFFEID *url.URL
}
// AuthType returns the type of TLSInfo as a string.
func (t TLSInfo) AuthType() string {
return "tls"
}
// GetSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
v := &TLSChannelzSecurityValue{
StandardName: cipherSuiteLookup[t.State.CipherSuite],
}
// Currently there's no way to get LocalCertificate info from tls package.
if len(t.State.PeerCertificates) > 0 {
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
}
return v
}
// tlsCreds is the credentials required for authenticating a connection using TLS.
type tlsCreds struct {
// TLS configuration
config *tls.Config
}
func (c tlsCreds) Info() ProtocolInfo {
return ProtocolInfo{
SecurityProtocol: "tls",
SecurityVersion: "1.2",
ServerName: c.config.ServerName,
}
}
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := credinternal.CloneTLSConfig(c.config)
if cfg.ServerName == "" {
serverName, _, err := net.SplitHostPort(authority)
if err != nil {
// If the authority had no host port or if the authority cannot be parsed, use it as-is.
serverName = authority
}
cfg.ServerName = serverName
}
conn := tls.Client(rawConn, cfg)
errChannel := make(chan error, 1)
go func() {
errChannel <- conn.Handshake()
close(errChannel)
}()
select {
case err := <-errChannel:
if err != nil {
conn.Close()
return nil, nil, err
}
case <-ctx.Done():
conn.Close()
return nil, nil, ctx.Err()
}
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
conn := tls.Server(rawConn, c.config)
if err := conn.Handshake(); err != nil {
conn.Close()
return nil, nil, err
}
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
}
func (c *tlsCreds) Clone() TransportCredentials {
return NewTLS(c.config)
}
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
c.config.ServerName = serverNameOverride
return nil
}
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
return tc
}
// NewClientTLSFromCert constructs TLS credentials from the provided root
// certificate authority certificate(s) to validate server connections. If
// certificates to establish the identity of the client need to be included in
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
// tls.Config can be specified.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header
// field) in requests.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
}
// NewClientTLSFromFile constructs TLS credentials from the provided root
// certificate authority certificate file(s) to validate server connections. If
// certificates to establish the identity of the client need to be included in
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
// tls.Config can be specified.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header
// field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
b, err := os.ReadFile(certFile)
if err != nil {
return nil, err
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
return nil, fmt.Errorf("credentials: failed to append certificates")
}
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
}
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
}
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
// file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
}
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string
LocalCertificate []byte
RemoteCertificate []byte
}
var cipherSuiteLookup = map[uint16]string{
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
}

117
vendor/google.golang.org/grpc/grpclog/component.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"fmt"
"google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
type componentData struct {
name string
}
var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warning(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Error(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatal(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) Infof(format string, args ...interface{}) {
c.InfoDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Warningf(format string, args ...interface{}) {
c.WarningDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Errorf(format string, args ...interface{}) {
c.ErrorDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Fatalf(format string, args ...interface{}) {
c.FatalDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Infoln(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warningln(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Errorln(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatalln(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) V(l int) bool {
return V(l)
}
// Component creates a new component and returns it for logging. If a component
// with the name already exists, nothing will be created and it will be
// returned. SetLoggerV2 will panic if it is called with a logger created by
// Component.
func Component(componentName string) DepthLoggerV2 {
if cData, ok := cache[componentName]; ok {
return cData
}
c := &componentData{componentName}
cache[componentName] = c
return c
}

132
vendor/google.golang.org/grpc/grpclog/grpclog.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpclog defines logging for grpc.
//
// All logs in transport and grpclb packages only go to verbose level 2.
// All logs in other packages in grpc are logged in spite of the verbosity level.
//
// In the default logger,
// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
package grpclog // import "google.golang.org/grpc/grpclog"
import (
"os"
"google.golang.org/grpc/internal/grpclog"
)
func init() {
SetLoggerV2(newLoggerV2())
}
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
return grpclog.Logger.V(l)
}
// Info logs to the INFO log.
func Info(args ...interface{}) {
grpclog.Logger.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...interface{}) {
grpclog.Logger.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...interface{}) {
grpclog.Logger.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...interface{}) {
grpclog.Logger.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...interface{}) {
grpclog.Logger.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...interface{}) {
grpclog.Logger.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...interface{}) {
grpclog.Logger.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...interface{}) {
grpclog.Logger.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...interface{}) {
grpclog.Logger.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...interface{}) {
grpclog.Logger.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...interface{}) {
grpclog.Logger.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
// It calle os.Exit()) with exit code 1.
func Fatalln(args ...interface{}) {
grpclog.Logger.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
//
// Deprecated: use Info.
func Print(args ...interface{}) {
grpclog.Logger.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...interface{}) {
grpclog.Logger.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...interface{}) {
grpclog.Logger.Infoln(args...)
}

87
vendor/google.golang.org/grpc/grpclog/logger.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import "google.golang.org/grpc/internal/grpclog"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
type Logger interface {
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
}
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
grpclog.Logger = &loggerWrapper{Logger: l}
}
// loggerWrapper wraps Logger into a LoggerV2.
type loggerWrapper struct {
Logger
}
func (g *loggerWrapper) Info(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Infoln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Infof(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Warning(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Warningln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Error(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Errorln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) V(l int) bool {
// Returns true for all verbose level.
return true
}

258
vendor/google.golang.org/grpc/grpclog/loggerv2.go generated vendored Normal file
View File

@@ -0,0 +1,258 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"google.golang.org/grpc/internal/grpclog"
)
// LoggerV2 does underlying logging work for grpclog.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{})
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{})
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{})
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{})
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{})
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{})
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{})
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{})
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{})
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{})
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{})
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{})
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
}
const (
// infoLog indicates Info severity.
infoLog int = iota
// warningLog indicates Warning severity.
warningLog
// errorLog indicates Error severity.
errorLog
// fatalLog indicates Fatal severity.
fatalLog
)
// severityName contains the string representation of each severity.
var severityName = []string{
infoLog: "INFO",
warningLog: "WARNING",
errorLog: "ERROR",
fatalLog: "FATAL",
}
// loggerT is the default logger used by grpclog.
type loggerT struct {
m []*log.Logger
v int
jsonFormat bool
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
// Error logs will be written to errorW, warningW and infoW.
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
}
type loggerV2Config struct {
verbose int
jsonFormat bool
}
func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
var m []*log.Logger
flag := log.LstdFlags
if c.jsonFormat {
flag = 0
}
m = append(m, log.New(infoW, "", flag))
m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
m = append(m, log.New(ew, "", flag))
m = append(m, log.New(ew, "", flag))
return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
// All logs are written to stderr.
func newLoggerV2() LoggerV2 {
errorW := io.Discard
warningW := io.Discard
infoW := io.Discard
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
switch logLevel {
case "", "ERROR", "error": // If env is unset, set level to ERROR.
errorW = os.Stderr
case "WARNING", "warning":
warningW = os.Stderr
case "INFO", "info":
infoW = os.Stderr
}
var v int
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl
}
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
verbose: v,
jsonFormat: jsonFormat,
})
}
func (g *loggerT) output(severity int, s string) {
sevStr := severityName[severity]
if !g.jsonFormat {
g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
return
}
// TODO: we can also include the logging component, but that needs more
// (API) changes.
b, _ := json.Marshal(map[string]string{
"severity": sevStr,
"message": s,
})
g.m[severity].Output(2, string(b))
}
func (g *loggerT) Info(args ...interface{}) {
g.output(infoLog, fmt.Sprint(args...))
}
func (g *loggerT) Infoln(args ...interface{}) {
g.output(infoLog, fmt.Sprintln(args...))
}
func (g *loggerT) Infof(format string, args ...interface{}) {
g.output(infoLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Warning(args ...interface{}) {
g.output(warningLog, fmt.Sprint(args...))
}
func (g *loggerT) Warningln(args ...interface{}) {
g.output(warningLog, fmt.Sprintln(args...))
}
func (g *loggerT) Warningf(format string, args ...interface{}) {
g.output(warningLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Error(args ...interface{}) {
g.output(errorLog, fmt.Sprint(args...))
}
func (g *loggerT) Errorln(args ...interface{}) {
g.output(errorLog, fmt.Sprintln(args...))
}
func (g *loggerT) Errorf(format string, args ...interface{}) {
g.output(errorLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Fatal(args ...interface{}) {
g.output(fatalLog, fmt.Sprint(args...))
os.Exit(1)
}
func (g *loggerT) Fatalln(args ...interface{}) {
g.output(fatalLog, fmt.Sprintln(args...))
os.Exit(1)
}
func (g *loggerT) Fatalf(format string, args ...interface{}) {
g.output(fatalLog, fmt.Sprintf(format, args...))
os.Exit(1)
}
func (g *loggerT) V(l int) bool {
return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"context"
)
// requestInfoKey is a struct to be used as the key to store RequestInfo in a
// context.
type requestInfoKey struct{}
// NewRequestInfoContext creates a context with ri.
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
return context.WithValue(ctx, requestInfoKey{}, ri)
}
// RequestInfoFromContext extracts the RequestInfo from ctx.
func RequestInfoFromContext(ctx context.Context) interface{} {
return ctx.Value(requestInfoKey{})
}
// clientHandshakeInfoKey is a struct used as the key to store
// ClientHandshakeInfo in a context.
type clientHandshakeInfoKey struct{}
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
return ctx.Value(clientHandshakeInfoKey{})
}
// NewClientHandshakeInfoContext creates a context with chi.
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
}

View File

@@ -0,0 +1,75 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package credentials defines APIs for parsing SPIFFE ID.
//
// All APIs in this package are experimental.
package credentials
import (
"crypto/tls"
"crypto/x509"
"net/url"
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("credentials")
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
// is invalid, return nil with warning.
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
return nil
}
return SPIFFEIDFromCert(state.PeerCertificates[0])
}
// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
// ID format is invalid, return nil with warning.
func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
if cert == nil || cert.URIs == nil {
return nil
}
var spiffeID *url.URL
for _, uri := range cert.URIs {
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
continue
}
// From this point, we assume the uri is intended for a SPIFFE ID.
if len(uri.String()) > 2048 {
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
return nil
}
if len(uri.Host) == 0 || len(uri.Path) == 0 {
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
return nil
}
if len(uri.Host) > 255 {
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
return nil
}
// A valid SPIFFE certificate can only have exactly one URI SAN field.
if len(cert.URIs) > 1 {
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
return nil
}
spiffeID = uri
}
return spiffeID
}

View File

@@ -0,0 +1,58 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"net"
"syscall"
)
type sysConn = syscall.Conn
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
// SyscallConn() (the method in interface syscall.Conn) is explicitly
// implemented on this type,
//
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
// help here).
type syscallConn struct {
net.Conn
// sysConn is a type alias of syscall.Conn. It's necessary because the name
// `Conn` collides with `net.Conn`.
sysConn
}
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
// will be used for read/write.
//
// This function returns newConn if rawConn doesn't implement syscall.Conn.
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
sysConn, ok := rawConn.(syscall.Conn)
if !ok {
return newConn
}
return &syscallConn{
Conn: newConn,
sysConn: sysConn,
}
}

View File

@@ -0,0 +1,52 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
)
const alpnProtoStrH2 = "h2"
// AppendH2ToNextProtos appends h2 to next protos.
func AppendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// CloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@@ -0,0 +1,126 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpclog (internal) defines depth logging for grpc.
package grpclog
import (
"os"
)
// Logger is the logger used for the non-depth log functions.
var Logger LoggerV2
// DepthLogger is the logger used for the depth log functions.
var DepthLogger DepthLoggerV2
// InfoDepth logs to the INFO log at the specified depth.
func InfoDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...)
} else {
Logger.Infoln(args...)
}
}
// WarningDepth logs to the WARNING log at the specified depth.
func WarningDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...)
} else {
Logger.Warningln(args...)
}
}
// ErrorDepth logs to the ERROR log at the specified depth.
func ErrorDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...)
} else {
Logger.Errorln(args...)
}
}
// FatalDepth logs to the FATAL log at the specified depth.
func FatalDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...)
} else {
Logger.Fatalln(args...)
}
os.Exit(1)
}
// LoggerV2 does underlying logging work for grpclog.
// This is a copy of the LoggerV2 defined in the external grpclog package. It
// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{})
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{})
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{})
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{})
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{})
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{})
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{})
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{})
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{})
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{})
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{})
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{})
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{})
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{})
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{})
}

View File

@@ -0,0 +1,93 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"fmt"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
logger DepthLoggerV2
prefix string
}
// Infof does info logging.
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
ErrorDepth(1, fmt.Sprintf(format, args...))
}
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
if !Logger.V(2) {
return
}
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
return Logger.V(l)
}
// NewPrefixLogger creates a prefix logger with the given prefix.
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}

170
vendor/google.golang.org/grpc/internal/internal.go generated vendored Normal file
View File

@@ -0,0 +1,170 @@
/*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package internal contains gRPC-internal code, to avoid polluting
// the godoc of the top-level grpc package. It must not import any grpc
// symbols to avoid circular dependencies.
package internal
import (
"context"
"time"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/serviceconfig"
)
var (
// WithHealthCheckFunc is set by dialoptions.go
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
// BalancerUnregister is exported by package balancer to unregister a balancer.
BalancerUnregister func(name string)
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
// ParseServiceConfig parses a JSON representation of the service config.
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and
// parsing. Both a and b should be returned by ParseServiceConfig.
// This function compares the config without rawJSON stripped, in case the
// there's difference in white space.
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
// GetCertificateProviderBuilder returns the registered builder for the
// given name. This is set by package certprovider for use from xDS
// bootstrap code while parsing certificate provider configs in the
// bootstrap file.
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by
// credentials/xds/xds.go.
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
// GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
CanonicalString interface{} // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports interface{} // func(*grpc.Server, string)
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
// ClearGlobalServerOptions clears the array of extra ServerOption. This
// method is useful in testing and benchmarking.
ClearGlobalServerOptions func()
// AddGlobalDialOptions adds an array of DialOption that will be effective
// globally for newly created client channels. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
AddGlobalDialOptions interface{} // func(opt ...DialOption)
// DisableGlobalDialOptions returns a DialOption that prevents the
// ClientConn from applying the global DialOptions (set via
// AddGlobalDialOptions).
DisableGlobalDialOptions interface{} // func() grpc.DialOption
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
ClearGlobalDialOptions func()
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
// JoinServerOptions combines the server options passed as arguments into a
// single server option.
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
// WithBinaryLogger returns a DialOption that specifies the binary logger
// for a ClientConn.
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
// BinaryLogger returns a ServerOption that can set the binary logger for a
// server.
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
// the provided xds bootstrap config instead of the global configuration from
// the supported environment variables. The resolver.Builder is meant to be
// used in conjunction with the grpc.WithResolvers DialOption.
//
// Testing Only
//
// This function should ONLY be used for testing and may not work with some
// other features, including the CSDS service.
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
// variable.
//
// TODO: Remove this function once the RLS env var is removed.
RegisterRLSClusterSpecifierPluginForTesting func()
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
// Specifier Plugin for testing purposes. This is needed because there is no way
// to unregister the RLS Cluster Specifier Plugin after registering it solely
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
//
// TODO: Remove this function once the RLS env var is removed.
UnregisterRLSClusterSpecifierPluginForTesting func()
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
// purposes, regardless of the RBAC environment variable.
//
// TODO: Remove this function once the RBAC env var is removed.
RegisterRBACHTTPFilterForTesting func()
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
// testing purposes. This is needed because there is no way to unregister the
// HTTP Filter after registering it solely for testing purposes using
// RegisterRBACHTTPFilterForTesting().
//
// TODO: Remove this function once the RBAC env var is removed.
UnregisterRBACHTTPFilterForTesting func()
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
CredsBundleModeFallback = "fallback"
// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
// mode.
CredsBundleModeBalancer = "balancer"
// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
// that supports backend returned by grpclb balancer.
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
)
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
//
// It currently has an experimental suffix which would be removed once
// end-to-end testing of the policy is completed.
const RLSLoadBalancingPolicyName = "rls_experimental"

View File

@@ -0,0 +1,82 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package pretty defines helper functions to pretty-print structs for logging.
package pretty
import (
"bytes"
"encoding/json"
"fmt"
"github.com/golang/protobuf/jsonpb"
protov1 "github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protov2 "google.golang.org/protobuf/proto"
)
const jsonIndent = " "
// ToJSON marshals the input into a json string.
//
// If marshal fails, it falls back to fmt.Sprintf("%+v").
func ToJSON(e interface{}) string {
switch ee := e.(type) {
case protov1.Message:
mm := jsonpb.Marshaler{Indent: jsonIndent}
ret, err := mm.MarshalToString(ee)
if err != nil {
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
// messages are not imported, and this will fail because the message
// is not found.
return fmt.Sprintf("%+v", ee)
}
return ret
case protov2.Message:
mm := protojson.MarshalOptions{
Multiline: true,
Indent: jsonIndent,
}
ret, err := mm.Marshal(ee)
if err != nil {
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
// messages are not imported, and this will fail because the message
// is not found.
return fmt.Sprintf("%+v", ee)
}
return string(ret)
default:
ret, err := json.MarshalIndent(ee, "", jsonIndent)
if err != nil {
return fmt.Sprintf("%+v", ee)
}
return string(ret)
}
}
// FormatJSON formats the input json bytes with indentation.
//
// If Indent fails, it returns the unchanged input as string.
func FormatJSON(b []byte) string {
var out bytes.Buffer
err := json.Indent(&out, b, "", jsonIndent)
if err != nil {
return string(b)
}
return out.String()
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package internal
import (
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/resolver"
)
// handshakeClusterNameKey is the type used as the key to store cluster name in
// the Attributes field of resolver.Address.
type handshakeClusterNameKey struct{}
// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
// is updated with the cluster name.
func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
return addr
}
// GetXDSHandshakeClusterName returns cluster name stored in attr.
func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
v := attr.Value(handshakeClusterNameKey{})
name, ok := v.(string)
return name, ok
}

138
vendor/google.golang.org/grpc/resolver/map.go generated vendored Normal file
View File

@@ -0,0 +1,138 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package resolver
type addressMapEntry struct {
addr Address
value interface{}
}
// AddressMap is a map of addresses to arbitrary values taking into account
// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
// Multiple accesses may not be performed concurrently. Must be created via
// NewAddressMap; do not construct directly.
type AddressMap struct {
// The underlying map is keyed by an Address with fields that we don't care
// about being set to their zero values. The only fields that we care about
// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
// distinguish between addresses with same `Addr` and `ServerName`, but
// different `Attributes`, we cannot store the `Attributes` in the map key.
//
// The comparison operation for structs work as follows:
// Struct values are comparable if all their fields are comparable. Two
// struct values are equal if their corresponding non-blank fields are equal.
//
// The value type of the map contains a slice of addresses which match the key
// in their `Addr` and `ServerName` fields and contain the corresponding value
// associated with them.
m map[Address]addressMapEntryList
}
func toMapKey(addr *Address) Address {
return Address{Addr: addr.Addr, ServerName: addr.ServerName}
}
type addressMapEntryList []*addressMapEntry
// NewAddressMap creates a new AddressMap.
func NewAddressMap() *AddressMap {
return &AddressMap{m: make(map[Address]addressMapEntryList)}
}
// find returns the index of addr in the addressMapEntry slice, or -1 if not
// present.
func (l addressMapEntryList) find(addr Address) int {
for i, entry := range l {
// Attributes are the only thing to match on here, since `Addr` and
// `ServerName` are already equal.
if entry.addr.Attributes.Equal(addr.Attributes) {
return i
}
}
return -1
}
// Get returns the value for the address in the map, if present.
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
return entryList[entry].value, true
}
return nil, false
}
// Set updates or adds the value to the address in the map.
func (a *AddressMap) Set(addr Address, value interface{}) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
entryList[entry].value = value
return
}
a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
}
// Delete removes addr from the map.
func (a *AddressMap) Delete(addr Address) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
entry := entryList.find(addr)
if entry == -1 {
return
}
if len(entryList) == 1 {
entryList = nil
} else {
copy(entryList[entry:], entryList[entry+1:])
entryList = entryList[:len(entryList)-1]
}
a.m[addrKey] = entryList
}
// Len returns the number of entries in the map.
func (a *AddressMap) Len() int {
ret := 0
for _, entryList := range a.m {
ret += len(entryList)
}
return ret
}
// Keys returns a slice of all current map keys.
func (a *AddressMap) Keys() []Address {
ret := make([]Address, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
ret = append(ret, entry.addr)
}
}
return ret
}
// Values returns a slice of all current map values.
func (a *AddressMap) Values() []interface{} {
ret := make([]interface{}, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
ret = append(ret, entry.value)
}
}
return ret
}

320
vendor/google.golang.org/grpc/resolver/resolver.go generated vendored Normal file
View File

@@ -0,0 +1,320 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package resolver defines APIs for name resolution in gRPC.
// All APIs in this package are experimental.
package resolver
import (
"context"
"net"
"net/url"
"strings"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/serviceconfig"
)
var (
// m is a map from scheme to resolver builder.
m = make(map[string]Builder)
// defaultScheme is the default scheme to use.
defaultScheme = "passthrough"
)
// TODO(bar) install dns resolver in init(){}.
// Register registers the resolver builder to the resolver map. b.Scheme will
// be used as the scheme registered with this builder. The registry is case
// sensitive, and schemes should not contain any uppercase characters.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Resolvers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
m[b.Scheme()] = b
}
// Get returns the resolver builder registered with the given scheme.
//
// If no builder is register with the scheme, nil will be returned.
func Get(scheme string) Builder {
if b, ok := m[scheme]; ok {
return b
}
return nil
}
// SetDefaultScheme sets the default scheme that will be used. The default
// default scheme is "passthrough".
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. The scheme set last overrides
// previously set values.
func SetDefaultScheme(scheme string) {
defaultScheme = scheme
}
// GetDefaultScheme gets the default scheme that will be used.
func GetDefaultScheme() string {
return defaultScheme
}
// AddressType indicates the address type returned by name resolution.
//
// Deprecated: use Attributes in Address instead.
type AddressType uint8
const (
// Backend indicates the address is for a backend server.
//
// Deprecated: use Attributes in Address instead.
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
//
// Deprecated: to select the GRPCLB load balancing policy, use a service
// config with a corresponding loadBalancingConfig. To supply balancer
// addresses to the GRPCLB load balancing policy, set State.Attributes
// using balancer/grpclb/state.Set.
GRPCLB
)
// Address represents a server the client connects to.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
// ServerName is the name of this address.
// If non-empty, the ServerName is used as the transport certification authority for
// the address, instead of the hostname from the Dial target string. In most cases,
// this should not be set.
//
// If Type is GRPCLB, ServerName should be the name of the remote load
// balancer, not the name of the backend.
//
// WARNING: ServerName must only be populated with trusted values. It
// is insecure to populate it with data from untrusted inputs since untrusted
// values could be used to bypass the authority checks performed by TLS.
ServerName string
// Attributes contains arbitrary data about this address intended for
// consumption by the SubConn.
Attributes *attributes.Attributes
// BalancerAttributes contains arbitrary data about this address intended
// for consumption by the LB policy. These attribes do not affect SubConn
// creation, connection establishment, handshaking, etc.
BalancerAttributes *attributes.Attributes
// Type is the type of this address.
//
// Deprecated: use Attributes instead.
Type AddressType
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
//
// Deprecated: use Attributes instead.
Metadata interface{}
}
// Equal returns whether a and o are identical. Metadata is compared directly,
// not with any recursive introspection.
func (a Address) Equal(o Address) bool {
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
a.Attributes.Equal(o.Attributes) &&
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
a.Type == o.Type && a.Metadata == o.Metadata
}
// String returns JSON formatted string representation of the address.
func (a Address) String() string {
return pretty.ToJSON(a)
}
// BuildOptions includes additional information for the builder to create
// the resolver.
type BuildOptions struct {
// DisableServiceConfig indicates whether a resolver implementation should
// fetch service config data.
DisableServiceConfig bool
// DialCreds is the transport credentials used by the ClientConn for
// communicating with the target gRPC service (set via
// WithTransportCredentials). In cases where a name resolution service
// requires the same credentials, the resolver may use this field. In most
// cases though, it is not appropriate, and this field may be ignored.
DialCreds credentials.TransportCredentials
// CredsBundle is the credentials bundle used by the ClientConn for
// communicating with the target gRPC service (set via
// WithCredentialsBundle). In cases where a name resolution service
// requires the same credentials, the resolver may use this field. In most
// cases though, it is not appropriate, and this field may be ignored.
CredsBundle credentials.Bundle
// Dialer is the custom dialer used by the ClientConn for dialling the
// target gRPC service (set via WithDialer). In cases where a name
// resolution service requires the same dialer, the resolver may use this
// field. In most cases though, it is not appropriate, and this field may
// be ignored.
Dialer func(context.Context, string) (net.Conn, error)
}
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
// Addresses is the latest set of resolved addresses for the target.
Addresses []Address
// ServiceConfig contains the result from parsing the latest service
// config. If it is nil, it indicates no service config is present or the
// resolver does not provide service configs.
ServiceConfig *serviceconfig.ParseResult
// Attributes contains arbitrary data about the resolver intended for
// consumption by the load balancing policy.
Attributes *attributes.Attributes
}
// ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
//
// If an error is returned, the resolver should try to resolve the
// target again. The resolver should use a backoff timer to prevent
// overloading the server with requests. If a resolver is certain that
// reresolving will not change the result, e.g. because it is
// a watch-based resolver, returned errors can be ignored.
//
// If the resolved State is the same as the last reported one, calling
// UpdateState can be omitted.
UpdateState(State) error
// ReportError notifies the ClientConn that the Resolver encountered an
// error. The ClientConn will notify the load balancer and begin calling
// ResolveNow on the Resolver with exponential backoff.
ReportError(error)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
// The address list should be the complete list of resolved addresses.
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
// NewServiceConfig is called by resolver to notify ClientConn a new
// service config. The service config should be provided as a json string.
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string)
// ParseServiceConfig parses the provided service config and returns an
// object that provides the parsed config.
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
}
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
// It is parsed from the target string that gets passed into Dial or DialContext
// by the user. And gRPC passes it to the resolver and the balancer.
//
// If the target follows the naming spec, and the parsed scheme is registered
// with gRPC, we will parse the target string according to the spec. If the
// target does not contain a scheme or if the parsed scheme is not registered
// (i.e. no corresponding resolver available to resolve the endpoint), we will
// apply the default scheme, and will attempt to reparse it.
//
// Examples:
//
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct {
// Deprecated: use URL.Scheme instead.
Scheme string
// Deprecated: use URL.Host instead.
Authority string
// URL contains the parsed dial target with an optional default scheme added
// to it if the original dial target contained no scheme or contained an
// unregistered scheme. Any query params specified in the original dial
// target can be accessed from here.
URL url.URL
}
// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
// or `URL.Opaque`. The latter is used when the former is empty.
func (t Target) Endpoint() string {
endpoint := t.URL.Path
if endpoint == "" {
endpoint = t.URL.Opaque
}
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
// value returned from url.Parse() contains a leading "/". Although this is
// in accordance with RFC 3986, we do not want to break existing resolver
// implementations which expect the endpoint without the leading "/". So, we
// end up stripping the leading "/" here. But this will result in an
// incorrect parsing for something like "unix:///path/to/socket". Since we
// own the "unix" resolver, we can workaround in the unix resolver by using
// the `URL` field.
return strings.TrimPrefix(endpoint, "/")
}
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.
//
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
// Scheme returns the scheme supported by this resolver. Scheme is defined
// at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
// string should not contain uppercase characters, as they will not match
// the parsed target's scheme as defined in RFC 3986.
Scheme() string
}
// ResolveNowOptions includes additional information for ResolveNow.
type ResolveNowOptions struct{}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
type Resolver interface {
// ResolveNow will be called by gRPC to try to resolve the target name
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
ResolveNow(ResolveNowOptions)
// Close closes the resolver.
Close()
}
// UnregisterForTesting removes the resolver builder with the given scheme from the
// resolver map.
// This function is for testing only.
func UnregisterForTesting(scheme string) {
delete(m, scheme)
}

View File

@@ -0,0 +1,44 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package serviceconfig
// Config represents an opaque data structure holding a service config.
type Config interface {
isServiceConfig()
}
// LoadBalancingConfig represents an opaque data structure holding a load
// balancing config.
type LoadBalancingConfig interface {
isLoadBalancingConfig()
}
// ParseResult contains a service config or an error. Exactly one must be
// non-nil.
type ParseResult struct {
Config Config
Err error
}

View File

@@ -0,0 +1,665 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protojson
import (
"encoding/base64"
"fmt"
"math"
"strconv"
"strings"
"google.golang.org/protobuf/internal/encoding/json"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/set"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Unmarshal reads the given []byte into the given proto.Message.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
}
// UnmarshalOptions is a configurable JSON format parser.
type UnmarshalOptions struct {
pragma.NoUnkeyedLiterals
// If AllowPartial is set, input for messages that will result in missing
// required fields will not return an error.
AllowPartial bool
// If DiscardUnknown is set, unknown fields are ignored.
DiscardUnknown bool
// Resolver is used for looking up types when unmarshaling
// google.protobuf.Any messages or extension fields.
// If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
}
// Unmarshal reads the given []byte and populates the given proto.Message
// using options in the UnmarshalOptions object.
// It will clear the message first before setting the fields.
// If it returns an error, the given message may be partially set.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
return o.unmarshal(b, m)
}
// unmarshal is a centralized function that all unmarshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for unmarshal that do not go through this.
func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
proto.Reset(m)
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
dec := decoder{json.NewDecoder(b), o}
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
return err
}
// Check for EOF.
tok, err := dec.Read()
if err != nil {
return err
}
if tok.Kind() != json.EOF {
return dec.unexpectedTokenError(tok)
}
if o.AllowPartial {
return nil
}
return proto.CheckInitialized(m)
}
type decoder struct {
*json.Decoder
opts UnmarshalOptions
}
// newError returns an error object with position info.
func (d decoder) newError(pos int, f string, x ...interface{}) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
}
// unexpectedTokenError returns a syntax error for the given unexpected token.
func (d decoder) unexpectedTokenError(tok json.Token) error {
return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
}
// syntaxError returns a syntax error for given position.
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
}
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
return unmarshal(d, m)
}
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.ObjectOpen {
return d.unexpectedTokenError(tok)
}
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
}
var seenNums set.Ints
var seenOneofs set.Ints
fieldDescs := messageDesc.Fields()
for {
// Read field name.
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
default:
return d.unexpectedTokenError(tok)
case json.ObjectClose:
return nil
case json.Name:
// Continue below.
}
name := tok.Name()
// Unmarshaling a non-custom embedded message in Any will contain the
// JSON field "@type" which should be skipped because it is not a field
// of the embedded message, but simply an artifact of the Any format.
if skipTypeURL && name == "@type" {
d.Read()
continue
}
// Get the FieldDescriptor.
var fd protoreflect.FieldDescriptor
if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
// Only extension names are in [name] format.
extName := protoreflect.FullName(name[1 : len(name)-1])
extType, err := d.opts.Resolver.FindExtensionByName(extName)
if err != nil && err != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
}
if extType != nil {
fd = extType.TypeDescriptor()
if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
}
}
} else {
// The name can either be the JSON name or the proto field name.
fd = fieldDescs.ByJSONName(name)
if fd == nil {
fd = fieldDescs.ByTextName(name)
}
}
if flags.ProtoLegacy {
if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
fd = nil // reset since the weak reference is not linked in
}
}
if fd == nil {
// Field is unknown.
if d.opts.DiscardUnknown {
if err := d.skipJSONValue(); err != nil {
return err
}
continue
}
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
}
// Do not allow duplicate fields.
num := uint64(fd.Number())
if seenNums.Has(num) {
return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
}
seenNums.Set(num)
// No need to set values for JSON null unless the field type is
// google.protobuf.Value or google.protobuf.NullValue.
if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
d.Read()
continue
}
switch {
case fd.IsList():
list := m.Mutable(fd).List()
if err := d.unmarshalList(list, fd); err != nil {
return err
}
case fd.IsMap():
mmap := m.Mutable(fd).Map()
if err := d.unmarshalMap(mmap, fd); err != nil {
return err
}
default:
// If field is a oneof, check if it has already been set.
if od := fd.ContainingOneof(); od != nil {
idx := uint64(od.Index())
if seenOneofs.Has(idx) {
return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
}
seenOneofs.Set(idx)
}
// Required or optional fields.
if err := d.unmarshalSingular(m, fd); err != nil {
return err
}
}
}
}
func isKnownValue(fd protoreflect.FieldDescriptor) bool {
md := fd.Message()
return md != nil && md.FullName() == genid.Value_message_fullname
}
func isNullValue(fd protoreflect.FieldDescriptor) bool {
ed := fd.Enum()
return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
}
// unmarshalSingular unmarshals to the non-repeated field specified
// by the given FieldDescriptor.
func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
var val protoreflect.Value
var err error
switch fd.Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
val = m.NewField(fd)
err = d.unmarshalMessage(val.Message(), false)
default:
val, err = d.unmarshalScalar(fd)
}
if err != nil {
return err
}
m.Set(fd, val)
return nil
}
// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
// the given FieldDescriptor.
func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
const b32 int = 32
const b64 int = 64
tok, err := d.Read()
if err != nil {
return protoreflect.Value{}, err
}
kind := fd.Kind()
switch kind {
case protoreflect.BoolKind:
if tok.Kind() == json.Bool {
return protoreflect.ValueOfBool(tok.Bool()), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if v, ok := unmarshalInt(tok, b32); ok {
return v, nil
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if v, ok := unmarshalInt(tok, b64); ok {
return v, nil
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if v, ok := unmarshalUint(tok, b32); ok {
return v, nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if v, ok := unmarshalUint(tok, b64); ok {
return v, nil
}
case protoreflect.FloatKind:
if v, ok := unmarshalFloat(tok, b32); ok {
return v, nil
}
case protoreflect.DoubleKind:
if v, ok := unmarshalFloat(tok, b64); ok {
return v, nil
}
case protoreflect.StringKind:
if tok.Kind() == json.String {
return protoreflect.ValueOfString(tok.ParsedString()), nil
}
case protoreflect.BytesKind:
if v, ok := unmarshalBytes(tok); ok {
return v, nil
}
case protoreflect.EnumKind:
if v, ok := unmarshalEnum(tok, fd); ok {
return v, nil
}
default:
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
}
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.Number:
return getInt(tok, bitSize)
case json.String:
// Decode number from string.
s := strings.TrimSpace(tok.ParsedString())
if len(s) != len(tok.ParsedString()) {
return protoreflect.Value{}, false
}
dec := json.NewDecoder([]byte(s))
tok, err := dec.Read()
if err != nil {
return protoreflect.Value{}, false
}
return getInt(tok, bitSize)
}
return protoreflect.Value{}, false
}
func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
n, ok := tok.Int(bitSize)
if !ok {
return protoreflect.Value{}, false
}
if bitSize == 32 {
return protoreflect.ValueOfInt32(int32(n)), true
}
return protoreflect.ValueOfInt64(n), true
}
func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.Number:
return getUint(tok, bitSize)
case json.String:
// Decode number from string.
s := strings.TrimSpace(tok.ParsedString())
if len(s) != len(tok.ParsedString()) {
return protoreflect.Value{}, false
}
dec := json.NewDecoder([]byte(s))
tok, err := dec.Read()
if err != nil {
return protoreflect.Value{}, false
}
return getUint(tok, bitSize)
}
return protoreflect.Value{}, false
}
func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
n, ok := tok.Uint(bitSize)
if !ok {
return protoreflect.Value{}, false
}
if bitSize == 32 {
return protoreflect.ValueOfUint32(uint32(n)), true
}
return protoreflect.ValueOfUint64(n), true
}
func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.Number:
return getFloat(tok, bitSize)
case json.String:
s := tok.ParsedString()
switch s {
case "NaN":
if bitSize == 32 {
return protoreflect.ValueOfFloat32(float32(math.NaN())), true
}
return protoreflect.ValueOfFloat64(math.NaN()), true
case "Infinity":
if bitSize == 32 {
return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
}
return protoreflect.ValueOfFloat64(math.Inf(+1)), true
case "-Infinity":
if bitSize == 32 {
return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
}
return protoreflect.ValueOfFloat64(math.Inf(-1)), true
}
// Decode number from string.
if len(s) != len(strings.TrimSpace(s)) {
return protoreflect.Value{}, false
}
dec := json.NewDecoder([]byte(s))
tok, err := dec.Read()
if err != nil {
return protoreflect.Value{}, false
}
return getFloat(tok, bitSize)
}
return protoreflect.Value{}, false
}
func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
n, ok := tok.Float(bitSize)
if !ok {
return protoreflect.Value{}, false
}
if bitSize == 32 {
return protoreflect.ValueOfFloat32(float32(n)), true
}
return protoreflect.ValueOfFloat64(n), true
}
func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
if tok.Kind() != json.String {
return protoreflect.Value{}, false
}
s := tok.ParsedString()
enc := base64.StdEncoding
if strings.ContainsAny(s, "-_") {
enc = base64.URLEncoding
}
if len(s)%4 != 0 {
enc = enc.WithPadding(base64.NoPadding)
}
b, err := enc.DecodeString(s)
if err != nil {
return protoreflect.Value{}, false
}
return protoreflect.ValueOfBytes(b), true
}
func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.String:
// Lookup EnumNumber based on name.
s := tok.ParsedString()
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
return protoreflect.ValueOfEnum(enumVal.Number()), true
}
case json.Number:
if n, ok := tok.Int(32); ok {
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
}
case json.Null:
// This is only valid for google.protobuf.NullValue.
if isNullValue(fd) {
return protoreflect.ValueOfEnum(0), true
}
}
return protoreflect.Value{}, false
}
func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.ArrayOpen {
return d.unexpectedTokenError(tok)
}
switch fd.Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
for {
tok, err := d.Peek()
if err != nil {
return err
}
if tok.Kind() == json.ArrayClose {
d.Read()
return nil
}
val := list.NewElement()
if err := d.unmarshalMessage(val.Message(), false); err != nil {
return err
}
list.Append(val)
}
default:
for {
tok, err := d.Peek()
if err != nil {
return err
}
if tok.Kind() == json.ArrayClose {
d.Read()
return nil
}
val, err := d.unmarshalScalar(fd)
if err != nil {
return err
}
list.Append(val)
}
}
return nil
}
func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.ObjectOpen {
return d.unexpectedTokenError(tok)
}
// Determine ahead whether map entry is a scalar type or a message type in
// order to call the appropriate unmarshalMapValue func inside the for loop
// below.
var unmarshalMapValue func() (protoreflect.Value, error)
switch fd.MapValue().Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
unmarshalMapValue = func() (protoreflect.Value, error) {
val := mmap.NewValue()
if err := d.unmarshalMessage(val.Message(), false); err != nil {
return protoreflect.Value{}, err
}
return val, nil
}
default:
unmarshalMapValue = func() (protoreflect.Value, error) {
return d.unmarshalScalar(fd.MapValue())
}
}
Loop:
for {
// Read field name.
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
default:
return d.unexpectedTokenError(tok)
case json.ObjectClose:
break Loop
case json.Name:
// Continue.
}
// Unmarshal field name.
pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
if err != nil {
return err
}
// Check for duplicate field name.
if mmap.Has(pkey) {
return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
}
// Read and unmarshal field value.
pval, err := unmarshalMapValue()
if err != nil {
return err
}
mmap.Set(pkey, pval)
}
return nil
}
// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
// A map key type is any integral or string type.
func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
const b32 = 32
const b64 = 64
const base10 = 10
name := tok.Name()
kind := fd.Kind()
switch kind {
case protoreflect.StringKind:
return protoreflect.ValueOfString(name).MapKey(), nil
case protoreflect.BoolKind:
switch name {
case "true":
return protoreflect.ValueOfBool(true).MapKey(), nil
case "false":
return protoreflect.ValueOfBool(false).MapKey(), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if n, err := strconv.ParseInt(name, base10, b32); err == nil {
return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if n, err := strconv.ParseInt(name, base10, b64); err == nil {
return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if n, err := strconv.ParseUint(name, base10, b32); err == nil {
return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if n, err := strconv.ParseUint(name, base10, b64); err == nil {
return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
}
default:
panic(fmt.Sprintf("invalid kind for map key: %v", kind))
}
return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
}

View File

@@ -0,0 +1,11 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package protojson marshals and unmarshals protocol buffer messages as JSON
// format. It follows the guide at
// https://protobuf.dev/programming-guides/proto3#json.
//
// This package produces a different output than the standard "encoding/json"
// package, which does not operate correctly on protocol buffer messages.
package protojson

View File

@@ -0,0 +1,349 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protojson
import (
"encoding/base64"
"fmt"
"google.golang.org/protobuf/internal/encoding/json"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const defaultIndent = " "
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given proto.Message in JSON format using default options.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
// MarshalOptions is a configurable JSON format marshaler.
type MarshalOptions struct {
pragma.NoUnkeyedLiterals
// Multiline specifies whether the marshaler should format the output in
// indented-form with every textual element on a new line.
// If Indent is an empty string, then an arbitrary indent is chosen.
Multiline bool
// Indent specifies the set of indentation characters to use in a multiline
// formatted output such that every entry is preceded by Indent and
// terminated by a newline. If non-empty, then Multiline is treated as true.
// Indent can only be composed of space or tab characters.
Indent string
// AllowPartial allows messages that have missing required fields to marshal
// without returning an error. If AllowPartial is false (the default),
// Marshal will return error if there are any missing required fields.
AllowPartial bool
// UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
// field names.
UseProtoNames bool
// UseEnumNumbers emits enum values as numbers.
UseEnumNumbers bool
// EmitUnpopulated specifies whether to emit unpopulated fields. It does not
// emit unpopulated oneof fields or unpopulated extension fields.
// The JSON value emitted for unpopulated fields are as follows:
// ╔═══════╤════════════════════════════╗
// ║ JSON │ Protobuf field ║
// ╠═══════╪════════════════════════════╣
// ║ false │ proto3 boolean fields ║
// ║ 0 │ proto3 numeric fields ║
// ║ "" │ proto3 string/bytes fields ║
// ║ null │ proto2 scalar fields ║
// ║ null │ message fields ║
// ║ [] │ list fields ║
// ║ {} │ map fields ║
// ╚═══════╧════════════════════════════╝
EmitUnpopulated bool
// Resolver is used for looking up types when expanding google.protobuf.Any
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
protoregistry.ExtensionTypeResolver
protoregistry.MessageTypeResolver
}
}
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
}
o.AllowPartial = true
b, _ := o.Marshal(m)
return string(b)
}
// Marshal marshals the given proto.Message in the JSON format using options in
// MarshalOptions. Do not depend on the output being stable. It may change over
// time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}
// MarshalAppend appends the JSON format encoding of m to b,
// returning the result.
func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
return o.marshal(b, m)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
if o.Multiline && o.Indent == "" {
o.Indent = defaultIndent
}
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
internalEnc, err := json.NewEncoder(b, o.Indent)
if err != nil {
return nil, err
}
// Treat nil message interface as an empty message,
// in which case the output in an empty JSON object.
if m == nil {
return append(b, '{', '}'), nil
}
enc := encoder{internalEnc, o}
if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
return nil, err
}
if o.AllowPartial {
return enc.Bytes(), nil
}
return enc.Bytes(), proto.CheckInitialized(m)
}
type encoder struct {
*json.Encoder
opts MarshalOptions
}
// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
var typeFieldDesc = func() protoreflect.FieldDescriptor {
var fd filedesc.Field
fd.L0.FullName = "@type"
fd.L0.Index = -1
fd.L1.Cardinality = protoreflect.Optional
fd.L1.Kind = protoreflect.StringKind
return &fd
}()
// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
// to additionally iterate over a synthetic field for the type URL.
type typeURLFieldRanger struct {
order.FieldRanger
typeURL string
}
func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
return
}
m.FieldRanger.Range(f)
}
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
// method to additionally iterate over unpopulated fields.
type unpopulatedFieldRanger struct{ protoreflect.Message }
func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
fds := m.Descriptor().Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if m.Has(fd) || fd.ContainingOneof() != nil {
continue // ignore populated fields and fields within a oneofs
}
v := m.Get(fd)
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
if isProto2Scalar || isSingularMessage {
v = protoreflect.Value{} // use invalid value to emit null
}
if !f(fd, v) {
return
}
}
m.Message.Range(f)
}
// marshalMessage marshals the fields in the given protoreflect.Message.
// If the typeURL is non-empty, then a synthetic "@type" field is injected
// containing the URL as the value.
func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
return errors.New("no support for proto1 MessageSets")
}
if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
return marshal(e, m)
}
e.StartObject()
defer e.EndObject()
var fields order.FieldRanger = m
if e.opts.EmitUnpopulated {
fields = unpopulatedFieldRanger{m}
}
if typeURL != "" {
fields = typeURLFieldRanger{fields, typeURL}
}
var err error
order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
name := fd.JSONName()
if e.opts.UseProtoNames {
name = fd.TextName()
}
if err = e.WriteName(name); err != nil {
return false
}
if err = e.marshalValue(v, fd); err != nil {
return false
}
return true
})
return err
}
// marshalValue marshals the given protoreflect.Value.
func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch {
case fd.IsList():
return e.marshalList(val.List(), fd)
case fd.IsMap():
return e.marshalMap(val.Map(), fd)
default:
return e.marshalSingular(val, fd)
}
}
// marshalSingular marshals the given non-repeated field value. This includes
// all scalar types, enums, messages, and groups.
func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
if !val.IsValid() {
e.WriteNull()
return nil
}
switch kind := fd.Kind(); kind {
case protoreflect.BoolKind:
e.WriteBool(val.Bool())
case protoreflect.StringKind:
if e.WriteString(val.String()) != nil {
return errors.InvalidUTF8(string(fd.FullName()))
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
e.WriteInt(val.Int())
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
e.WriteUint(val.Uint())
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
// 64-bit integers are written out as JSON string.
e.WriteString(val.String())
case protoreflect.FloatKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 32)
case protoreflect.DoubleKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 64)
case protoreflect.BytesKind:
e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
case protoreflect.EnumKind:
if fd.Enum().FullName() == genid.NullValue_enum_fullname {
e.WriteNull()
} else {
desc := fd.Enum().Values().ByNumber(val.Enum())
if e.opts.UseEnumNumbers || desc == nil {
e.WriteInt(int64(val.Enum()))
} else {
e.WriteString(string(desc.Name()))
}
}
case protoreflect.MessageKind, protoreflect.GroupKind:
if err := e.marshalMessage(val.Message(), ""); err != nil {
return err
}
default:
panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
}
return nil
}
// marshalList marshals the given protoreflect.List.
func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
e.StartArray()
defer e.EndArray()
for i := 0; i < list.Len(); i++ {
item := list.Get(i)
if err := e.marshalSingular(item, fd); err != nil {
return err
}
}
return nil
}
// marshalMap marshals given protoreflect.Map.
func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
e.StartObject()
defer e.EndObject()
var err error
order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
if err = e.WriteName(k.String()); err != nil {
return false
}
if err = e.marshalSingular(v, fd.MapValue()); err != nil {
return false
}
return true
})
return err
}

View File

@@ -0,0 +1,895 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protojson
import (
"bytes"
"fmt"
"math"
"strconv"
"strings"
"time"
"google.golang.org/protobuf/internal/encoding/json"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
)
type marshalFunc func(encoder, protoreflect.Message) error
// wellKnownTypeMarshaler returns a marshal function if the message type
// has specialized serialization behavior. It returns nil otherwise.
func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
if name.Parent() == genid.GoogleProtobuf_package {
switch name.Name() {
case genid.Any_message_name:
return encoder.marshalAny
case genid.Timestamp_message_name:
return encoder.marshalTimestamp
case genid.Duration_message_name:
return encoder.marshalDuration
case genid.BoolValue_message_name,
genid.Int32Value_message_name,
genid.Int64Value_message_name,
genid.UInt32Value_message_name,
genid.UInt64Value_message_name,
genid.FloatValue_message_name,
genid.DoubleValue_message_name,
genid.StringValue_message_name,
genid.BytesValue_message_name:
return encoder.marshalWrapperType
case genid.Struct_message_name:
return encoder.marshalStruct
case genid.ListValue_message_name:
return encoder.marshalListValue
case genid.Value_message_name:
return encoder.marshalKnownValue
case genid.FieldMask_message_name:
return encoder.marshalFieldMask
case genid.Empty_message_name:
return encoder.marshalEmpty
}
}
return nil
}
type unmarshalFunc func(decoder, protoreflect.Message) error
// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
// has specialized serialization behavior. It returns nil otherwise.
func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
if name.Parent() == genid.GoogleProtobuf_package {
switch name.Name() {
case genid.Any_message_name:
return decoder.unmarshalAny
case genid.Timestamp_message_name:
return decoder.unmarshalTimestamp
case genid.Duration_message_name:
return decoder.unmarshalDuration
case genid.BoolValue_message_name,
genid.Int32Value_message_name,
genid.Int64Value_message_name,
genid.UInt32Value_message_name,
genid.UInt64Value_message_name,
genid.FloatValue_message_name,
genid.DoubleValue_message_name,
genid.StringValue_message_name,
genid.BytesValue_message_name:
return decoder.unmarshalWrapperType
case genid.Struct_message_name:
return decoder.unmarshalStruct
case genid.ListValue_message_name:
return decoder.unmarshalListValue
case genid.Value_message_name:
return decoder.unmarshalKnownValue
case genid.FieldMask_message_name:
return decoder.unmarshalFieldMask
case genid.Empty_message_name:
return decoder.unmarshalEmpty
}
}
return nil
}
// The JSON representation of an Any message uses the regular representation of
// the deserialized, embedded message, with an additional field `@type` which
// contains the type URL. If the embedded message type is well-known and has a
// custom JSON representation, that representation will be embedded adding a
// field `value` which holds the custom JSON in addition to the `@type` field.
func (e encoder) marshalAny(m protoreflect.Message) error {
fds := m.Descriptor().Fields()
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
fdValue := fds.ByNumber(genid.Any_Value_field_number)
if !m.Has(fdType) {
if !m.Has(fdValue) {
// If message is empty, marshal out empty JSON object.
e.StartObject()
e.EndObject()
return nil
} else {
// Return error if type_url field is not set, but value is set.
return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
}
}
typeVal := m.Get(fdType)
valueVal := m.Get(fdValue)
// Resolve the type in order to unmarshal value field.
typeURL := typeVal.String()
emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
}
em := emt.New()
err = proto.UnmarshalOptions{
AllowPartial: true, // never check required fields inside an Any
Resolver: e.opts.Resolver,
}.Unmarshal(valueVal.Bytes(), em.Interface())
if err != nil {
return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
}
// If type of value has custom JSON encoding, marshal out a field "value"
// with corresponding custom JSON encoding of the embedded message as a
// field.
if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
e.StartObject()
defer e.EndObject()
// Marshal out @type field.
e.WriteName("@type")
if err := e.WriteString(typeURL); err != nil {
return err
}
e.WriteName("value")
return marshal(e, em)
}
// Else, marshal out the embedded message's fields in this Any object.
if err := e.marshalMessage(em, typeURL); err != nil {
return err
}
return nil
}
func (d decoder) unmarshalAny(m protoreflect.Message) error {
// Peek to check for json.ObjectOpen to avoid advancing a read.
start, err := d.Peek()
if err != nil {
return err
}
if start.Kind() != json.ObjectOpen {
return d.unexpectedTokenError(start)
}
// Use another decoder to parse the unread bytes for @type field. This
// avoids advancing a read from current decoder because the current JSON
// object may contain the fields of the embedded type.
dec := decoder{d.Clone(), UnmarshalOptions{}}
tok, err := findTypeURL(dec)
switch err {
case errEmptyObject:
// An empty JSON object translates to an empty Any message.
d.Read() // Read json.ObjectOpen.
d.Read() // Read json.ObjectClose.
return nil
case errMissingType:
if d.opts.DiscardUnknown {
// Treat all fields as unknowns, similar to an empty object.
return d.skipJSONValue()
}
// Use start.Pos() for line position.
return d.newError(start.Pos(), err.Error())
default:
if err != nil {
return err
}
}
typeURL := tok.ParsedString()
emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
}
// Create new message for the embedded message type and unmarshal into it.
em := emt.New()
if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
// If embedded message is a custom type,
// unmarshal the JSON "value" field into it.
if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
return err
}
} else {
// Else unmarshal the current JSON object into it.
if err := d.unmarshalMessage(em, true); err != nil {
return err
}
}
// Serialize the embedded message and assign the resulting bytes to the
// proto value field.
b, err := proto.MarshalOptions{
AllowPartial: true, // No need to check required fields inside an Any.
Deterministic: true,
}.Marshal(em.Interface())
if err != nil {
return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
}
fds := m.Descriptor().Fields()
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
fdValue := fds.ByNumber(genid.Any_Value_field_number)
m.Set(fdType, protoreflect.ValueOfString(typeURL))
m.Set(fdValue, protoreflect.ValueOfBytes(b))
return nil
}
var errEmptyObject = fmt.Errorf(`empty object`)
var errMissingType = fmt.Errorf(`missing "@type" field`)
// findTypeURL returns the token for the "@type" field value from the given
// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
// It returns errEmptyObject if the JSON object is empty or errMissingType if
// @type field does not exist. It returns other error if the @type field is not
// valid or other decoding issues.
func findTypeURL(d decoder) (json.Token, error) {
var typeURL string
var typeTok json.Token
numFields := 0
// Skip start object.
d.Read()
Loop:
for {
tok, err := d.Read()
if err != nil {
return json.Token{}, err
}
switch tok.Kind() {
case json.ObjectClose:
if typeURL == "" {
// Did not find @type field.
if numFields > 0 {
return json.Token{}, errMissingType
}
return json.Token{}, errEmptyObject
}
break Loop
case json.Name:
numFields++
if tok.Name() != "@type" {
// Skip value.
if err := d.skipJSONValue(); err != nil {
return json.Token{}, err
}
continue
}
// Return error if this was previously set already.
if typeURL != "" {
return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
}
// Read field value.
tok, err := d.Read()
if err != nil {
return json.Token{}, err
}
if tok.Kind() != json.String {
return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
}
typeURL = tok.ParsedString()
if typeURL == "" {
return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
}
typeTok = tok
}
}
return typeTok, nil
}
// skipJSONValue parses a JSON value (null, boolean, string, number, object and
// array) in order to advance the read to the next JSON value. It relies on
// the decoder returning an error if the types are not in valid sequence.
func (d decoder) skipJSONValue() error {
tok, err := d.Read()
if err != nil {
return err
}
// Only need to continue reading for objects and arrays.
switch tok.Kind() {
case json.ObjectOpen:
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case json.ObjectClose:
return nil
case json.Name:
// Skip object field value.
if err := d.skipJSONValue(); err != nil {
return err
}
}
}
case json.ArrayOpen:
for {
tok, err := d.Peek()
if err != nil {
return err
}
switch tok.Kind() {
case json.ArrayClose:
d.Read()
return nil
default:
// Skip array item.
if err := d.skipJSONValue(); err != nil {
return err
}
}
}
}
return nil
}
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
// object's "value" field.
func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
// Skip ObjectOpen, and start reading the fields.
d.Read()
var found bool // Used for detecting duplicate "value".
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case json.ObjectClose:
if !found {
return d.newError(tok.Pos(), `missing "value" field`)
}
return nil
case json.Name:
switch tok.Name() {
case "@type":
// Skip the value as this was previously parsed already.
d.Read()
case "value":
if found {
return d.newError(tok.Pos(), `duplicate "value" field`)
}
// Unmarshal the field value into the given message.
if err := unmarshal(d, m); err != nil {
return err
}
found = true
default:
if d.opts.DiscardUnknown {
if err := d.skipJSONValue(); err != nil {
return err
}
continue
}
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
}
}
}
}
// Wrapper types are encoded as JSON primitives like string, number or boolean.
func (e encoder) marshalWrapperType(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
val := m.Get(fd)
return e.marshalSingular(val, fd)
}
func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
val, err := d.unmarshalScalar(fd)
if err != nil {
return err
}
m.Set(fd, val)
return nil
}
// The JSON representation for Empty is an empty JSON object.
func (e encoder) marshalEmpty(protoreflect.Message) error {
e.StartObject()
e.EndObject()
return nil
}
func (d decoder) unmarshalEmpty(protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.ObjectOpen {
return d.unexpectedTokenError(tok)
}
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case json.ObjectClose:
return nil
case json.Name:
if d.opts.DiscardUnknown {
if err := d.skipJSONValue(); err != nil {
return err
}
continue
}
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
default:
return d.unexpectedTokenError(tok)
}
}
}
// The JSON representation for Struct is a JSON object that contains the encoded
// Struct.fields map and follows the serialization rules for a map.
func (e encoder) marshalStruct(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
return e.marshalMap(m.Get(fd).Map(), fd)
}
func (d decoder) unmarshalStruct(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
return d.unmarshalMap(m.Mutable(fd).Map(), fd)
}
// The JSON representation for ListValue is JSON array that contains the encoded
// ListValue.values repeated field and follows the serialization rules for a
// repeated field.
func (e encoder) marshalListValue(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
return e.marshalList(m.Get(fd).List(), fd)
}
func (d decoder) unmarshalListValue(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
return d.unmarshalList(m.Mutable(fd).List(), fd)
}
// The JSON representation for a Value is dependent on the oneof field that is
// set. Each of the field in the oneof has its own custom serialization rule. A
// Value message needs to be a oneof field set, else it is an error.
func (e encoder) marshalKnownValue(m protoreflect.Message) error {
od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
fd := m.WhichOneof(od)
if fd == nil {
return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
}
if fd.Number() == genid.Value_NumberValue_field_number {
if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
}
}
return e.marshalSingular(m.Get(fd), fd)
}
func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
tok, err := d.Peek()
if err != nil {
return err
}
var fd protoreflect.FieldDescriptor
var val protoreflect.Value
switch tok.Kind() {
case json.Null:
d.Read()
fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
val = protoreflect.ValueOfEnum(0)
case json.Bool:
tok, err := d.Read()
if err != nil {
return err
}
fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
val = protoreflect.ValueOfBool(tok.Bool())
case json.Number:
tok, err := d.Read()
if err != nil {
return err
}
fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
var ok bool
val, ok = unmarshalFloat(tok, 64)
if !ok {
return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
}
case json.String:
// A JSON string may have been encoded from the number_value field,
// e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
// for it to be in JSON string form. Given this custom encoding spec,
// however, there is no way to identify that and hence a JSON string is
// always assigned to the string_value field, which means that certain
// encoding cannot be parsed back to the same field.
tok, err := d.Read()
if err != nil {
return err
}
fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
val = protoreflect.ValueOfString(tok.ParsedString())
case json.ObjectOpen:
fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
val = m.NewField(fd)
if err := d.unmarshalStruct(val.Message()); err != nil {
return err
}
case json.ArrayOpen:
fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
val = m.NewField(fd)
if err := d.unmarshalListValue(val.Message()); err != nil {
return err
}
default:
return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
}
m.Set(fd, val)
return nil
}
// The JSON representation for a Duration is a JSON string that ends in the
// suffix "s" (indicating seconds) and is preceded by the number of seconds,
// with nanoseconds expressed as fractional seconds.
//
// Durations less than one second are represented with a 0 seconds field and a
// positive or negative nanos field. For durations of one second or more, a
// non-zero value for the nanos field must be of the same sign as the seconds
// field.
//
// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
const (
secondsInNanos = 999999999
maxSecondsInDuration = 315576000000
)
func (e encoder) marshalDuration(m protoreflect.Message) error {
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
secsVal := m.Get(fdSeconds)
nanosVal := m.Get(fdNanos)
secs := secsVal.Int()
nanos := nanosVal.Int()
if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
}
if nanos < -secondsInNanos || nanos > secondsInNanos {
return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
}
if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
}
// Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision, followed by the suffix "s".
var sign string
if secs < 0 || nanos < 0 {
sign, secs, nanos = "-", -1*secs, -1*nanos
}
x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
e.WriteString(x + "s")
return nil
}
func (d decoder) unmarshalDuration(m protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.String {
return d.unexpectedTokenError(tok)
}
secs, nanos, ok := parseDuration(tok.ParsedString())
if !ok {
return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
}
// Validate seconds. No need to validate nanos because parseDuration would
// have covered that already.
if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
}
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
return nil
}
// parseDuration parses the given input string for seconds and nanoseconds value
// for the Duration JSON format. The format is a decimal number with a suffix
// 's'. It can have optional plus/minus sign. There needs to be at least an
// integer or fractional part. Fractional part is limited to 9 digits only for
// nanoseconds precision, regardless of whether there are trailing zero digits.
// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
func parseDuration(input string) (int64, int32, bool) {
b := []byte(input)
size := len(b)
if size < 2 {
return 0, 0, false
}
if b[size-1] != 's' {
return 0, 0, false
}
b = b[:size-1]
// Read optional plus/minus symbol.
var neg bool
switch b[0] {
case '-':
neg = true
b = b[1:]
case '+':
b = b[1:]
}
if len(b) == 0 {
return 0, 0, false
}
// Read the integer part.
var intp []byte
switch {
case b[0] == '0':
b = b[1:]
case '1' <= b[0] && b[0] <= '9':
intp = b[0:]
b = b[1:]
n := 1
for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
n++
b = b[1:]
}
intp = intp[:n]
case b[0] == '.':
// Continue below.
default:
return 0, 0, false
}
hasFrac := false
var frac [9]byte
if len(b) > 0 {
if b[0] != '.' {
return 0, 0, false
}
// Read the fractional part.
b = b[1:]
n := 0
for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
frac[n] = b[0]
n++
b = b[1:]
}
// It is not valid if there are more bytes left.
if len(b) > 0 {
return 0, 0, false
}
// Pad fractional part with 0s.
for i := n; i < 9; i++ {
frac[i] = '0'
}
hasFrac = true
}
var secs int64
if len(intp) > 0 {
var err error
secs, err = strconv.ParseInt(string(intp), 10, 64)
if err != nil {
return 0, 0, false
}
}
var nanos int64
if hasFrac {
nanob := bytes.TrimLeft(frac[:], "0")
if len(nanob) > 0 {
var err error
nanos, err = strconv.ParseInt(string(nanob), 10, 32)
if err != nil {
return 0, 0, false
}
}
}
if neg {
if secs > 0 {
secs = -secs
}
if nanos > 0 {
nanos = -nanos
}
}
return secs, int32(nanos), true
}
// The JSON representation for a Timestamp is a JSON string in the RFC 3339
// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
// {year} is always expressed using four digits while {month}, {day}, {hour},
// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
// should always use UTC (as indicated by "Z") and a decoder should be able to
// accept both UTC and other timezones (as indicated by an offset).
//
// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
// inclusive.
// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
const (
maxTimestampSeconds = 253402300799
minTimestampSeconds = -62135596800
)
func (e encoder) marshalTimestamp(m protoreflect.Message) error {
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
secsVal := m.Get(fdSeconds)
nanosVal := m.Get(fdNanos)
secs := secsVal.Int()
nanos := nanosVal.Int()
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
}
if nanos < 0 || nanos > secondsInNanos {
return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
}
// Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
// 6 or 9 fractional digits.
t := time.Unix(secs, nanos).UTC()
x := t.Format("2006-01-02T15:04:05.000000000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
e.WriteString(x + "Z")
return nil
}
func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.String {
return d.unexpectedTokenError(tok)
}
s := tok.ParsedString()
t, err := time.Parse(time.RFC3339Nano, s)
if err != nil {
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
}
// Validate seconds.
secs := t.Unix()
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
}
// Validate subseconds.
i := strings.LastIndexByte(s, '.') // start of subsecond field
j := strings.LastIndexAny(s, "Z-+") // start of timezone field
if i >= 0 && j >= i && j-i > len(".999999999") {
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
}
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
return nil
}
// The JSON representation for a FieldMask is a JSON string where paths are
// separated by a comma. Fields name in each path are converted to/from
// lower-camel naming conventions. Encoding should fail if the path name would
// end up differently after a round-trip.
func (e encoder) marshalFieldMask(m protoreflect.Message) error {
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
list := m.Get(fd).List()
paths := make([]string, 0, list.Len())
for i := 0; i < list.Len(); i++ {
s := list.Get(i).String()
if !protoreflect.FullName(s).IsValid() {
return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
}
// Return error if conversion to camelCase is not reversible.
cc := strs.JSONCamelCase(s)
if s != strs.JSONSnakeCase(cc) {
return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
}
paths = append(paths, cc)
}
e.WriteString(strings.Join(paths, ","))
return nil
}
func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != json.String {
return d.unexpectedTokenError(tok)
}
str := strings.TrimSpace(tok.ParsedString())
if str == "" {
return nil
}
paths := strings.Split(str, ",")
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
list := m.Mutable(fd).List()
for _, s0 := range paths {
s := strs.JSONSnakeCase(s0)
if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
}
list.Append(protoreflect.ValueOfString(s))
}
return nil
}

View File

@@ -0,0 +1,340 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"fmt"
"io"
"regexp"
"unicode/utf8"
"google.golang.org/protobuf/internal/errors"
)
// call specifies which Decoder method was invoked.
type call uint8
const (
readCall call = iota
peekCall
)
const unexpectedFmt = "unexpected token %s"
// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
// Decoder is a token-based JSON decoder.
type Decoder struct {
// lastCall is last method called, either readCall or peekCall.
// Initial value is readCall.
lastCall call
// lastToken contains the last read token.
lastToken Token
// lastErr contains the last read error.
lastErr error
// openStack is a stack containing ObjectOpen and ArrayOpen values. The
// top of stack represents the object or the array the current value is
// directly located in.
openStack []Kind
// orig is used in reporting line and column.
orig []byte
// in contains the unconsumed input.
in []byte
}
// NewDecoder returns a Decoder to read the given []byte.
func NewDecoder(b []byte) *Decoder {
return &Decoder{orig: b, in: b}
}
// Peek looks ahead and returns the next token kind without advancing a read.
func (d *Decoder) Peek() (Token, error) {
defer func() { d.lastCall = peekCall }()
if d.lastCall == readCall {
d.lastToken, d.lastErr = d.Read()
}
return d.lastToken, d.lastErr
}
// Read returns the next JSON token.
// It will return an error if there is no valid token.
func (d *Decoder) Read() (Token, error) {
const scalar = Null | Bool | Number | String
defer func() { d.lastCall = readCall }()
if d.lastCall == peekCall {
return d.lastToken, d.lastErr
}
tok, err := d.parseNext()
if err != nil {
return Token{}, err
}
switch tok.kind {
case EOF:
if len(d.openStack) != 0 ||
d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
return Token{}, ErrUnexpectedEOF
}
case Null:
if !d.isValueNext() {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
case Bool, Number:
if !d.isValueNext() {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
case String:
if d.isValueNext() {
break
}
// This string token should only be for a field name.
if d.lastToken.kind&(ObjectOpen|comma) == 0 {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
if len(d.in) == 0 {
return Token{}, ErrUnexpectedEOF
}
if c := d.in[0]; c != ':' {
return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
}
tok.kind = Name
d.consume(1)
case ObjectOpen, ArrayOpen:
if !d.isValueNext() {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
d.openStack = append(d.openStack, tok.kind)
case ObjectClose:
if len(d.openStack) == 0 ||
d.lastToken.kind == comma ||
d.openStack[len(d.openStack)-1] != ObjectOpen {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
d.openStack = d.openStack[:len(d.openStack)-1]
case ArrayClose:
if len(d.openStack) == 0 ||
d.lastToken.kind == comma ||
d.openStack[len(d.openStack)-1] != ArrayOpen {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
d.openStack = d.openStack[:len(d.openStack)-1]
case comma:
if len(d.openStack) == 0 ||
d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
}
}
// Update d.lastToken only after validating token to be in the right sequence.
d.lastToken = tok
if d.lastToken.kind == comma {
return d.Read()
}
return tok, nil
}
// Any sequence that looks like a non-delimiter (for error reporting).
var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
// parseNext parses for the next JSON token. It returns a Token object for
// different types, except for Name. It does not handle whether the next token
// is in a valid sequence or not.
func (d *Decoder) parseNext() (Token, error) {
// Trim leading spaces.
d.consume(0)
in := d.in
if len(in) == 0 {
return d.consumeToken(EOF, 0), nil
}
switch in[0] {
case 'n':
if n := matchWithDelim("null", in); n != 0 {
return d.consumeToken(Null, n), nil
}
case 't':
if n := matchWithDelim("true", in); n != 0 {
return d.consumeBoolToken(true, n), nil
}
case 'f':
if n := matchWithDelim("false", in); n != 0 {
return d.consumeBoolToken(false, n), nil
}
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
if n, ok := parseNumber(in); ok {
return d.consumeToken(Number, n), nil
}
case '"':
s, n, err := d.parseString(in)
if err != nil {
return Token{}, err
}
return d.consumeStringToken(s, n), nil
case '{':
return d.consumeToken(ObjectOpen, 1), nil
case '}':
return d.consumeToken(ObjectClose, 1), nil
case '[':
return d.consumeToken(ArrayOpen, 1), nil
case ']':
return d.consumeToken(ArrayClose, 1), nil
case ',':
return d.consumeToken(comma, 1), nil
}
return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
}
// newSyntaxError returns an error with line and column information useful for
// syntax errors.
func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
e := errors.New(f, x...)
line, column := d.Position(pos)
return errors.New("syntax error (line %d:%d): %v", line, column, e)
}
// Position returns line and column number of given index of the original input.
// It will panic if index is out of range.
func (d *Decoder) Position(idx int) (line int, column int) {
b := d.orig[:idx]
line = bytes.Count(b, []byte("\n")) + 1
if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
b = b[i+1:]
}
column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
return line, column
}
// currPos returns the current index position of d.in from d.orig.
func (d *Decoder) currPos() int {
return len(d.orig) - len(d.in)
}
// matchWithDelim matches s with the input b and verifies that the match
// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
// As a special case, EOF is considered a delimiter. It returns the length of s
// if there is a match, else 0.
func matchWithDelim(s string, b []byte) int {
if !bytes.HasPrefix(b, []byte(s)) {
return 0
}
n := len(s)
if n < len(b) && isNotDelim(b[n]) {
return 0
}
return n
}
// isNotDelim returns true if given byte is a not delimiter character.
func isNotDelim(c byte) bool {
return (c == '-' || c == '+' || c == '.' || c == '_' ||
('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9'))
}
// consume consumes n bytes of input and any subsequent whitespace.
func (d *Decoder) consume(n int) {
d.in = d.in[n:]
for len(d.in) > 0 {
switch d.in[0] {
case ' ', '\n', '\r', '\t':
d.in = d.in[1:]
default:
return
}
}
}
// isValueNext returns true if next type should be a JSON value: Null,
// Number, String or Bool.
func (d *Decoder) isValueNext() bool {
if len(d.openStack) == 0 {
return d.lastToken.kind == 0
}
start := d.openStack[len(d.openStack)-1]
switch start {
case ObjectOpen:
return d.lastToken.kind&Name != 0
case ArrayOpen:
return d.lastToken.kind&(ArrayOpen|comma) != 0
}
panic(fmt.Sprintf(
"unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
d.lastToken.kind, start))
}
// consumeToken constructs a Token for given Kind with raw value derived from
// current d.in and given size, and consumes the given size-length of it.
func (d *Decoder) consumeToken(kind Kind, size int) Token {
tok := Token{
kind: kind,
raw: d.in[:size],
pos: len(d.orig) - len(d.in),
}
d.consume(size)
return tok
}
// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
// current d.in and given size.
func (d *Decoder) consumeBoolToken(b bool, size int) Token {
tok := Token{
kind: Bool,
raw: d.in[:size],
pos: len(d.orig) - len(d.in),
boo: b,
}
d.consume(size)
return tok
}
// consumeStringToken constructs a Token for a String kind with raw value derived
// from current d.in and given size.
func (d *Decoder) consumeStringToken(s string, size int) Token {
tok := Token{
kind: String,
raw: d.in[:size],
pos: len(d.orig) - len(d.in),
str: s,
}
d.consume(size)
return tok
}
// Clone returns a copy of the Decoder for use in reading ahead the next JSON
// object, array or other values without affecting current Decoder.
func (d *Decoder) Clone() *Decoder {
ret := *d
ret.openStack = append([]Kind(nil), ret.openStack...)
return &ret
}

View File

@@ -0,0 +1,254 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"strconv"
)
// parseNumber reads the given []byte for a valid JSON number. If it is valid,
// it returns the number of bytes. Parsing logic follows the definition in
// https://tools.ietf.org/html/rfc7159#section-6, and is based off
// encoding/json.isValidNumber function.
func parseNumber(input []byte) (int, bool) {
var n int
s := input
if len(s) == 0 {
return 0, false
}
// Optional -
if s[0] == '-' {
s = s[1:]
n++
if len(s) == 0 {
return 0, false
}
}
// Digits
switch {
case s[0] == '0':
s = s[1:]
n++
case '1' <= s[0] && s[0] <= '9':
s = s[1:]
n++
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
default:
return 0, false
}
// . followed by 1 or more digits.
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
s = s[2:]
n += 2
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
}
// e or E followed by an optional - or + and
// 1 or more digits.
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
s = s[1:]
n++
if s[0] == '+' || s[0] == '-' {
s = s[1:]
n++
if len(s) == 0 {
return 0, false
}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
}
// Check that next byte is a delimiter or it is at the end.
if n < len(input) && isNotDelim(input[n]) {
return 0, false
}
return n, true
}
// numberParts is the result of parsing out a valid JSON number. It contains
// the parts of a number. The parts are used for integer conversion.
type numberParts struct {
neg bool
intp []byte
frac []byte
exp []byte
}
// parseNumber constructs numberParts from given []byte. The logic here is
// similar to consumeNumber above with the difference of having to construct
// numberParts. The slice fields in numberParts are subslices of the input.
func parseNumberParts(input []byte) (numberParts, bool) {
var neg bool
var intp []byte
var frac []byte
var exp []byte
s := input
if len(s) == 0 {
return numberParts{}, false
}
// Optional -
if s[0] == '-' {
neg = true
s = s[1:]
if len(s) == 0 {
return numberParts{}, false
}
}
// Digits
switch {
case s[0] == '0':
// Skip first 0 and no need to store.
s = s[1:]
case '1' <= s[0] && s[0] <= '9':
intp = s
n := 1
s = s[1:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
intp = intp[:n]
default:
return numberParts{}, false
}
// . followed by 1 or more digits.
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
frac = s[1:]
n := 1
s = s[2:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
frac = frac[:n]
}
// e or E followed by an optional - or + and
// 1 or more digits.
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
s = s[1:]
exp = s
n := 0
if s[0] == '+' || s[0] == '-' {
s = s[1:]
n++
if len(s) == 0 {
return numberParts{}, false
}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
exp = exp[:n]
}
return numberParts{
neg: neg,
intp: intp,
frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
exp: exp,
}, true
}
// normalizeToIntString returns an integer string in normal form without the
// E-notation for given numberParts. It will return false if it is not an
// integer or if the exponent exceeds than max/min int value.
func normalizeToIntString(n numberParts) (string, bool) {
intpSize := len(n.intp)
fracSize := len(n.frac)
if intpSize == 0 && fracSize == 0 {
return "0", true
}
var exp int
if len(n.exp) > 0 {
i, err := strconv.ParseInt(string(n.exp), 10, 32)
if err != nil {
return "", false
}
exp = int(i)
}
var num []byte
if exp >= 0 {
// For positive E, shift fraction digits into integer part and also pad
// with zeroes as needed.
// If there are more digits in fraction than the E value, then the
// number is not an integer.
if fracSize > exp {
return "", false
}
// Make sure resulting digits are within max value limit to avoid
// unnecessarily constructing a large byte slice that may simply fail
// later on.
const maxDigits = 20 // Max uint64 value has 20 decimal digits.
if intpSize+exp > maxDigits {
return "", false
}
// Set cap to make a copy of integer part when appended.
num = n.intp[:len(n.intp):len(n.intp)]
num = append(num, n.frac...)
for i := 0; i < exp-fracSize; i++ {
num = append(num, '0')
}
} else {
// For negative E, shift digits in integer part out.
// If there are fractions, then the number is not an integer.
if fracSize > 0 {
return "", false
}
// index is where the decimal point will be after adjusting for negative
// exponent.
index := intpSize + exp
if index < 0 {
return "", false
}
num = n.intp
// If any of the digits being shifted to the right of the decimal point
// is non-zero, then the number is not an integer.
for i := index; i < intpSize; i++ {
if num[i] != '0' {
return "", false
}
}
num = num[:index]
}
if n.neg {
return "-" + string(num), true
}
return string(num), true
}

View File

@@ -0,0 +1,91 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strconv"
"unicode"
"unicode/utf16"
"unicode/utf8"
"google.golang.org/protobuf/internal/strs"
)
func (d *Decoder) parseString(in []byte) (string, int, error) {
in0 := in
if len(in) == 0 {
return "", 0, ErrUnexpectedEOF
}
if in[0] != '"' {
return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
}
in = in[1:]
i := indexNeedEscapeInBytes(in)
in, out := in[i:], in[:i:i] // set cap to prevent mutations
for len(in) > 0 {
switch r, n := utf8.DecodeRune(in); {
case r == utf8.RuneError && n == 1:
return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
case r < ' ':
return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
case r == '"':
in = in[1:]
n := len(in0) - len(in)
return string(out), n, nil
case r == '\\':
if len(in) < 2 {
return "", 0, ErrUnexpectedEOF
}
switch r := in[1]; r {
case '"', '\\', '/':
in, out = in[2:], append(out, r)
case 'b':
in, out = in[2:], append(out, '\b')
case 'f':
in, out = in[2:], append(out, '\f')
case 'n':
in, out = in[2:], append(out, '\n')
case 'r':
in, out = in[2:], append(out, '\r')
case 't':
in, out = in[2:], append(out, '\t')
case 'u':
if len(in) < 6 {
return "", 0, ErrUnexpectedEOF
}
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
if err != nil {
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
}
in = in[6:]
r := rune(v)
if utf16.IsSurrogate(r) {
if len(in) < 6 {
return "", 0, ErrUnexpectedEOF
}
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
r = utf16.DecodeRune(r, rune(v))
if in[0] != '\\' || in[1] != 'u' ||
r == unicode.ReplacementChar || err != nil {
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
}
in = in[6:]
}
out = append(out, string(r)...)
default:
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
}
default:
i := indexNeedEscapeInBytes(in[n:])
in, out = in[n+i:], append(out, in[:n+i]...)
}
}
return "", 0, ErrUnexpectedEOF
}
// indexNeedEscapeInBytes returns the index of the character that needs
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }

View File

@@ -0,0 +1,192 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"fmt"
"strconv"
)
// Kind represents a token kind expressible in the JSON format.
type Kind uint16
const (
Invalid Kind = (1 << iota) / 2
EOF
Null
Bool
Number
String
Name
ObjectOpen
ObjectClose
ArrayOpen
ArrayClose
// comma is only for parsing in between tokens and
// does not need to be exported.
comma
)
func (k Kind) String() string {
switch k {
case EOF:
return "eof"
case Null:
return "null"
case Bool:
return "bool"
case Number:
return "number"
case String:
return "string"
case ObjectOpen:
return "{"
case ObjectClose:
return "}"
case Name:
return "name"
case ArrayOpen:
return "["
case ArrayClose:
return "]"
case comma:
return ","
}
return "<invalid>"
}
// Token provides a parsed token kind and value.
//
// Values are provided by the difference accessor methods. The accessor methods
// Name, Bool, and ParsedString will panic if called on the wrong kind. There
// are different accessor methods for the Number kind for converting to the
// appropriate Go numeric type and those methods have the ok return value.
type Token struct {
// Token kind.
kind Kind
// pos provides the position of the token in the original input.
pos int
// raw bytes of the serialized token.
// This is a subslice into the original input.
raw []byte
// boo is parsed boolean value.
boo bool
// str is parsed string value.
str string
}
// Kind returns the token kind.
func (t Token) Kind() Kind {
return t.kind
}
// RawString returns the read value in string.
func (t Token) RawString() string {
return string(t.raw)
}
// Pos returns the token position from the input.
func (t Token) Pos() int {
return t.pos
}
// Name returns the object name if token is Name, else it panics.
func (t Token) Name() string {
if t.kind == Name {
return t.str
}
panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
}
// Bool returns the bool value if token kind is Bool, else it panics.
func (t Token) Bool() bool {
if t.kind == Bool {
return t.boo
}
panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
}
// ParsedString returns the string value for a JSON string token or the read
// value in string if token is not a string.
func (t Token) ParsedString() string {
if t.kind == String {
return t.str
}
panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
}
// Float returns the floating-point number if token kind is Number.
//
// The floating-point precision is specified by the bitSize parameter: 32 for
// float32 or 64 for float64. If bitSize=32, the result still has type float64,
// but it will be convertible to float32 without changing its value. It will
// return false if the number exceeds the floating point limits for given
// bitSize.
func (t Token) Float(bitSize int) (float64, bool) {
if t.kind != Number {
return 0, false
}
f, err := strconv.ParseFloat(t.RawString(), bitSize)
if err != nil {
return 0, false
}
return f, true
}
// Int returns the signed integer number if token is Number.
//
// The given bitSize specifies the integer type that the result must fit into.
// It returns false if the number is not an integer value or if the result
// exceeds the limits for given bitSize.
func (t Token) Int(bitSize int) (int64, bool) {
s, ok := t.getIntStr()
if !ok {
return 0, false
}
n, err := strconv.ParseInt(s, 10, bitSize)
if err != nil {
return 0, false
}
return n, true
}
// Uint returns the signed integer number if token is Number.
//
// The given bitSize specifies the unsigned integer type that the result must
// fit into. It returns false if the number is not an unsigned integer value
// or if the result exceeds the limits for given bitSize.
func (t Token) Uint(bitSize int) (uint64, bool) {
s, ok := t.getIntStr()
if !ok {
return 0, false
}
n, err := strconv.ParseUint(s, 10, bitSize)
if err != nil {
return 0, false
}
return n, true
}
func (t Token) getIntStr() (string, bool) {
if t.kind != Number {
return "", false
}
parts, ok := parseNumberParts(t.raw)
if !ok {
return "", false
}
return normalizeToIntString(parts)
}
// TokenEquals returns true if given Tokens are equal, else false.
func TokenEquals(x, y Token) bool {
return x.kind == y.kind &&
x.pos == y.pos &&
bytes.Equal(x.raw, y.raw) &&
x.boo == y.boo &&
x.str == y.str
}

View File

@@ -0,0 +1,278 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"math"
"math/bits"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/errors"
)
// kind represents an encoding type.
type kind uint8
const (
_ kind = (1 << iota) / 2
name
scalar
objectOpen
objectClose
arrayOpen
arrayClose
)
// Encoder provides methods to write out JSON constructs and values. The user is
// responsible for producing valid sequences of JSON constructs and values.
type Encoder struct {
indent string
lastKind kind
indents []byte
out []byte
}
// NewEncoder returns an Encoder.
//
// If indent is a non-empty string, it causes every entry for an Array or Object
// to be preceded by the indent and trailed by a newline.
func NewEncoder(buf []byte, indent string) (*Encoder, error) {
e := &Encoder{
out: buf,
}
if len(indent) > 0 {
if strings.Trim(indent, " \t") != "" {
return nil, errors.New("indent may only be composed of space or tab characters")
}
e.indent = indent
}
return e, nil
}
// Bytes returns the content of the written bytes.
func (e *Encoder) Bytes() []byte {
return e.out
}
// WriteNull writes out the null value.
func (e *Encoder) WriteNull() {
e.prepareNext(scalar)
e.out = append(e.out, "null"...)
}
// WriteBool writes out the given boolean value.
func (e *Encoder) WriteBool(b bool) {
e.prepareNext(scalar)
if b {
e.out = append(e.out, "true"...)
} else {
e.out = append(e.out, "false"...)
}
}
// WriteString writes out the given string in JSON string value. Returns error
// if input string contains invalid UTF-8.
func (e *Encoder) WriteString(s string) error {
e.prepareNext(scalar)
var err error
if e.out, err = appendString(e.out, s); err != nil {
return err
}
return nil
}
// Sentinel error used for indicating invalid UTF-8.
var errInvalidUTF8 = errors.New("invalid UTF-8")
func appendString(out []byte, in string) ([]byte, error) {
out = append(out, '"')
i := indexNeedEscapeInString(in)
in, out = in[i:], append(out, in[:i]...)
for len(in) > 0 {
switch r, n := utf8.DecodeRuneInString(in); {
case r == utf8.RuneError && n == 1:
return out, errInvalidUTF8
case r < ' ' || r == '"' || r == '\\':
out = append(out, '\\')
switch r {
case '"', '\\':
out = append(out, byte(r))
case '\b':
out = append(out, 'b')
case '\f':
out = append(out, 'f')
case '\n':
out = append(out, 'n')
case '\r':
out = append(out, 'r')
case '\t':
out = append(out, 't')
default:
out = append(out, 'u')
out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
}
in = in[n:]
default:
i := indexNeedEscapeInString(in[n:])
in, out = in[n+i:], append(out, in[:n+i]...)
}
}
out = append(out, '"')
return out, nil
}
// indexNeedEscapeInString returns the index of the character that needs
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInString(s string) int {
for i, r := range s {
if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
return i
}
}
return len(s)
}
// WriteFloat writes out the given float and bitSize in JSON number value.
func (e *Encoder) WriteFloat(n float64, bitSize int) {
e.prepareNext(scalar)
e.out = appendFloat(e.out, n, bitSize)
}
// appendFloat formats given float in bitSize, and appends to the given []byte.
func appendFloat(out []byte, n float64, bitSize int) []byte {
switch {
case math.IsNaN(n):
return append(out, `"NaN"`...)
case math.IsInf(n, +1):
return append(out, `"Infinity"`...)
case math.IsInf(n, -1):
return append(out, `"-Infinity"`...)
}
// JSON number formatting logic based on encoding/json.
// See floatEncoder.encode for reference.
fmt := byte('f')
if abs := math.Abs(n); abs != 0 {
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
fmt = 'e'
}
}
out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
if fmt == 'e' {
n := len(out)
if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
out[n-2] = out[n-1]
out = out[:n-1]
}
}
return out
}
// WriteInt writes out the given signed integer in JSON number value.
func (e *Encoder) WriteInt(n int64) {
e.prepareNext(scalar)
e.out = strconv.AppendInt(e.out, n, 10)
}
// WriteUint writes out the given unsigned integer in JSON number value.
func (e *Encoder) WriteUint(n uint64) {
e.prepareNext(scalar)
e.out = strconv.AppendUint(e.out, n, 10)
}
// StartObject writes out the '{' symbol.
func (e *Encoder) StartObject() {
e.prepareNext(objectOpen)
e.out = append(e.out, '{')
}
// EndObject writes out the '}' symbol.
func (e *Encoder) EndObject() {
e.prepareNext(objectClose)
e.out = append(e.out, '}')
}
// WriteName writes out the given string in JSON string value and the name
// separator ':'. Returns error if input string contains invalid UTF-8, which
// should not be likely as protobuf field names should be valid.
func (e *Encoder) WriteName(s string) error {
e.prepareNext(name)
var err error
// Append to output regardless of error.
e.out, err = appendString(e.out, s)
e.out = append(e.out, ':')
return err
}
// StartArray writes out the '[' symbol.
func (e *Encoder) StartArray() {
e.prepareNext(arrayOpen)
e.out = append(e.out, '[')
}
// EndArray writes out the ']' symbol.
func (e *Encoder) EndArray() {
e.prepareNext(arrayClose)
e.out = append(e.out, ']')
}
// prepareNext adds possible comma and indentation for the next value based
// on last type and indent option. It also updates lastKind to next.
func (e *Encoder) prepareNext(next kind) {
defer func() {
// Set lastKind to next.
e.lastKind = next
}()
if len(e.indent) == 0 {
// Need to add comma on the following condition.
if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
next&(name|scalar|objectOpen|arrayOpen) != 0 {
e.out = append(e.out, ',')
// For single-line output, add a random extra space after each
// comma to make output unstable.
if detrand.Bool() {
e.out = append(e.out, ' ')
}
}
return
}
switch {
case e.lastKind&(objectOpen|arrayOpen) != 0:
// If next type is NOT closing, add indent and newline.
if next&(objectClose|arrayClose) == 0 {
e.indents = append(e.indents, e.indent...)
e.out = append(e.out, '\n')
e.out = append(e.out, e.indents...)
}
case e.lastKind&(scalar|objectClose|arrayClose) != 0:
switch {
// If next type is either a value or name, add comma and newline.
case next&(name|scalar|objectOpen|arrayOpen) != 0:
e.out = append(e.out, ',', '\n')
// If next type is a closing object or array, adjust indentation.
case next&(objectClose|arrayClose) != 0:
e.indents = e.indents[:len(e.indents)-len(e.indent)]
e.out = append(e.out, '\n')
}
e.out = append(e.out, e.indents...)
case e.lastKind&name != 0:
e.out = append(e.out, ' ')
// For multi-line output, add a random extra space after key: to make
// output unstable.
if detrand.Bool() {
e.out = append(e.out, ' ')
}
}
}

23
vendor/modules.txt generated vendored
View File

@@ -405,6 +405,7 @@ github.com/golang/groupcache/lru
github.com/golang/mock/gomock
# github.com/golang/protobuf v1.5.3
## explicit; go 1.9
github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
@@ -688,7 +689,7 @@ github.com/openshift/oc/pkg/helpers/motd
github.com/openshift/oc/pkg/helpers/project
github.com/openshift/oc/pkg/helpers/term
github.com/openshift/oc/pkg/helpers/tokencmd
# github.com/operator-framework/api v0.17.6
# github.com/operator-framework/api v0.17.7
## explicit; go 1.19
github.com/operator-framework/api/pkg/lib/version
github.com/operator-framework/api/pkg/operators
@@ -797,7 +798,7 @@ github.com/sergi/go-diff/diffmatchpatch
# github.com/sethvargo/go-envconfig v0.9.0
## explicit; go 1.17
github.com/sethvargo/go-envconfig
# github.com/sirupsen/logrus v1.9.0
# github.com/sirupsen/logrus v1.9.2
## explicit; go 1.13
github.com/sirupsen/logrus
# github.com/skeema/knownhosts v1.1.1
@@ -993,22 +994,34 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20220706185917-7780775163c4
## explicit; go 1.15
# google.golang.org/genproto v0.0.0-20230525154841-bd750badd5c6
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.51.0
# google.golang.org/grpc v1.54.0
## explicit; go 1.17
google.golang.org/grpc/attributes
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
google.golang.org/grpc/grpclog
google.golang.org/grpc/internal
google.golang.org/grpc/internal/credentials
google.golang.org/grpc/internal/grpclog
google.golang.org/grpc/internal/pretty
google.golang.org/grpc/internal/status
google.golang.org/grpc/resolver
google.golang.org/grpc/serviceconfig
google.golang.org/grpc/status
# google.golang.org/protobuf v1.31.0
## explicit; go 1.11
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
google.golang.org/protobuf/internal/descfmt
google.golang.org/protobuf/internal/descopts
google.golang.org/protobuf/internal/detrand
google.golang.org/protobuf/internal/encoding/defval
google.golang.org/protobuf/internal/encoding/json
google.golang.org/protobuf/internal/encoding/messageset
google.golang.org/protobuf/internal/encoding/tag
google.golang.org/protobuf/internal/encoding/text