From b451b093ed0cd9812b0de7dabcaf1fa98e80be03 Mon Sep 17 00:00:00 2001 From: shaowenchen Date: Fri, 20 Sep 2024 18:07:21 +0800 Subject: [PATCH] draft: event --- go.mod | 8 +- go.sum | 14 + pkg/event/bus.go | 43 + pkg/event/event.go | 51 + .../sdk-go/protocol/nats/v2/LICENSE | 201 + .../sdk-go/protocol/nats/v2/doc.go | 9 + .../sdk-go/protocol/nats/v2/message.go | 46 + .../sdk-go/protocol/nats/v2/options.go | 52 + .../sdk-go/protocol/nats/v2/protocol.go | 112 + .../sdk-go/protocol/nats/v2/receiver.go | 145 + .../sdk-go/protocol/nats/v2/sender.go | 97 + .../sdk-go/protocol/nats/v2/subscriber.go | 38 + .../sdk-go/protocol/nats/v2/write_message.go | 42 + .../github.com/cloudevents/sdk-go/v2/LICENSE | 201 + .../github.com/cloudevents/sdk-go/v2/alias.go | 187 + .../sdk-go/v2/binding/binary_writer.go | 52 + .../cloudevents/sdk-go/v2/binding/doc.go | 66 + .../cloudevents/sdk-go/v2/binding/encoding.go | 50 + .../sdk-go/v2/binding/event_message.go | 110 + .../sdk-go/v2/binding/finish_message.go | 42 + .../sdk-go/v2/binding/format/doc.go | 12 + .../sdk-go/v2/binding/format/format.go | 105 + .../cloudevents/sdk-go/v2/binding/message.go | 153 + .../sdk-go/v2/binding/spec/attributes.go | 141 + .../cloudevents/sdk-go/v2/binding/spec/doc.go | 12 + .../v2/binding/spec/match_exact_version.go | 81 + .../sdk-go/v2/binding/spec/spec.go | 189 + .../sdk-go/v2/binding/structured_writer.go | 22 + .../cloudevents/sdk-go/v2/binding/to_event.go | 153 + .../sdk-go/v2/binding/transformer.go | 42 + .../cloudevents/sdk-go/v2/binding/write.go | 179 + .../cloudevents/sdk-go/v2/client/client.go | 295 + .../sdk-go/v2/client/client_http.go | 35 + .../sdk-go/v2/client/client_observed.go | 12 + .../sdk-go/v2/client/defaulters.go | 57 + .../cloudevents/sdk-go/v2/client/doc.go | 11 + .../sdk-go/v2/client/http_receiver.go | 45 + .../cloudevents/sdk-go/v2/client/invoker.go | 145 + .../sdk-go/v2/client/observability.go | 54 + .../cloudevents/sdk-go/v2/client/options.go | 141 + .../cloudevents/sdk-go/v2/client/receiver.go | 193 + .../cloudevents/sdk-go/v2/context/context.go | 110 + .../sdk-go/v2/context/delegating.go | 25 + .../cloudevents/sdk-go/v2/context/doc.go | 10 + .../cloudevents/sdk-go/v2/context/logger.go | 48 + .../cloudevents/sdk-go/v2/context/retry.go | 76 + .../sdk-go/v2/event/content_type.go | 47 + .../sdk-go/v2/event/data_content_encoding.go | 16 + .../sdk-go/v2/event/datacodec/codec.go | 78 + .../sdk-go/v2/event/datacodec/doc.go | 10 + .../sdk-go/v2/event/datacodec/json/data.go | 56 + .../sdk-go/v2/event/datacodec/json/doc.go | 9 + .../sdk-go/v2/event/datacodec/text/data.go | 30 + .../sdk-go/v2/event/datacodec/text/doc.go | 9 + .../sdk-go/v2/event/datacodec/xml/data.go | 40 + .../sdk-go/v2/event/datacodec/xml/doc.go | 9 + .../cloudevents/sdk-go/v2/event/doc.go | 9 + .../cloudevents/sdk-go/v2/event/event.go | 125 + .../cloudevents/sdk-go/v2/event/event_data.go | 118 + .../sdk-go/v2/event/event_interface.go | 102 + .../sdk-go/v2/event/event_marshal.go | 203 + .../sdk-go/v2/event/event_reader.go | 103 + .../sdk-go/v2/event/event_unmarshal.go | 480 ++ .../sdk-go/v2/event/event_validation.go | 50 + .../sdk-go/v2/event/event_writer.go | 117 + .../sdk-go/v2/event/eventcontext.go | 125 + .../sdk-go/v2/event/eventcontext_v03.go | 330 + .../v2/event/eventcontext_v03_reader.go | 99 + .../v2/event/eventcontext_v03_writer.go | 103 + .../sdk-go/v2/event/eventcontext_v1.go | 315 + .../sdk-go/v2/event/eventcontext_v1_reader.go | 104 + .../sdk-go/v2/event/eventcontext_v1_writer.go | 97 + .../cloudevents/sdk-go/v2/event/extensions.go | 57 + .../cloudevents/sdk-go/v2/protocol/doc.go | 25 + .../cloudevents/sdk-go/v2/protocol/error.go | 42 + .../v2/protocol/http/abuse_protection.go | 128 + .../sdk-go/v2/protocol/http/context.go | 48 + .../sdk-go/v2/protocol/http/doc.go | 9 + .../sdk-go/v2/protocol/http/headers.go | 55 + .../sdk-go/v2/protocol/http/message.go | 175 + .../sdk-go/v2/protocol/http/options.go | 300 + .../sdk-go/v2/protocol/http/protocol.go | 411 ++ .../v2/protocol/http/protocol_lifecycle.go | 143 + .../sdk-go/v2/protocol/http/protocol_rate.go | 34 + .../sdk-go/v2/protocol/http/protocol_retry.go | 126 + .../sdk-go/v2/protocol/http/result.go | 60 + .../sdk-go/v2/protocol/http/retries_result.go | 59 + .../sdk-go/v2/protocol/http/utility.go | 89 + .../sdk-go/v2/protocol/http/write_request.go | 142 + .../v2/protocol/http/write_responsewriter.go | 126 + .../cloudevents/sdk-go/v2/protocol/inbound.go | 54 + .../sdk-go/v2/protocol/lifecycle.go | 23 + .../sdk-go/v2/protocol/outbound.go | 49 + .../cloudevents/sdk-go/v2/protocol/result.go | 127 + .../cloudevents/sdk-go/v2/staticcheck.conf | 3 + .../cloudevents/sdk-go/v2/types/allocate.go | 41 + .../cloudevents/sdk-go/v2/types/doc.go | 45 + .../cloudevents/sdk-go/v2/types/timestamp.go | 75 + .../cloudevents/sdk-go/v2/types/uri.go | 86 + .../cloudevents/sdk-go/v2/types/uriref.go | 82 + .../cloudevents/sdk-go/v2/types/value.go | 337 + vendor/github.com/klauspost/compress/LICENSE | 304 + .../klauspost/compress/flate/deflate.go | 1017 +++ .../klauspost/compress/flate/dict_decoder.go | 184 + .../klauspost/compress/flate/fast_encoder.go | 193 + .../compress/flate/huffman_bit_writer.go | 1182 ++++ .../klauspost/compress/flate/huffman_code.go | 417 ++ .../compress/flate/huffman_sortByFreq.go | 159 + .../compress/flate/huffman_sortByLiteral.go | 201 + .../klauspost/compress/flate/inflate.go | 829 +++ .../klauspost/compress/flate/inflate_gen.go | 1283 ++++ .../klauspost/compress/flate/level1.go | 241 + .../klauspost/compress/flate/level2.go | 214 + .../klauspost/compress/flate/level3.go | 241 + .../klauspost/compress/flate/level4.go | 221 + .../klauspost/compress/flate/level5.go | 708 ++ .../klauspost/compress/flate/level6.go | 325 + .../compress/flate/matchlen_amd64.go | 16 + .../klauspost/compress/flate/matchlen_amd64.s | 68 + .../compress/flate/matchlen_generic.go | 33 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 318 + .../klauspost/compress/flate/token.go | 379 ++ vendor/github.com/nats-io/nats.go/.gitignore | 45 + .../github.com/nats-io/nats.go/.golangci.yaml | 13 + vendor/github.com/nats-io/nats.go/.travis.yml | 36 + vendor/github.com/nats-io/nats.go/.words | 106 + .../github.com/nats-io/nats.go/.words.readme | 25 + .../nats-io/nats.go/CODE-OF-CONDUCT.md | 3 + .../github.com/nats-io/nats.go/GOVERNANCE.md | 3 + vendor/github.com/nats-io/nats.go/LICENSE | 201 + .../github.com/nats-io/nats.go/MAINTAINERS.md | 8 + vendor/github.com/nats-io/nats.go/README.md | 480 ++ vendor/github.com/nats-io/nats.go/context.go | 244 + .../nats-io/nats.go/dependencies.md | 15 + vendor/github.com/nats-io/nats.go/enc.go | 269 + .../nats.go/encoders/builtin/default_enc.go | 117 + .../nats.go/encoders/builtin/gob_enc.go | 45 + .../nats.go/encoders/builtin/json_enc.go | 56 + vendor/github.com/nats-io/nats.go/go_test.mod | 22 + vendor/github.com/nats-io/nats.go/go_test.sum | 48 + .../nats-io/nats.go/internal/parser/parse.go | 104 + vendor/github.com/nats-io/nats.go/js.go | 3815 +++++++++++ vendor/github.com/nats-io/nats.go/jserrors.go | 235 + vendor/github.com/nats-io/nats.go/jsm.go | 1665 +++++ vendor/github.com/nats-io/nats.go/kv.go | 1119 ++++ .../nats-io/nats.go/legacy_jetstream.md | 83 + vendor/github.com/nats-io/nats.go/nats.go | 5688 +++++++++++++++++ vendor/github.com/nats-io/nats.go/netchan.go | 111 + vendor/github.com/nats-io/nats.go/object.go | 1386 ++++ vendor/github.com/nats-io/nats.go/parser.go | 554 ++ vendor/github.com/nats-io/nats.go/rand.go | 29 + .../nats-io/nats.go/testing_internal.go | 59 + vendor/github.com/nats-io/nats.go/timer.go | 56 + vendor/github.com/nats-io/nats.go/util/tls.go | 28 + .../nats-io/nats.go/util/tls_go17.go | 50 + vendor/github.com/nats-io/nats.go/ws.go | 780 +++ vendor/github.com/nats-io/nkeys/.gitignore | 16 + .../github.com/nats-io/nkeys/.goreleaser.yml | 63 + vendor/github.com/nats-io/nkeys/GOVERNANCE.md | 3 + vendor/github.com/nats-io/nkeys/LICENSE | 201 + .../github.com/nats-io/nkeys/MAINTAINERS.md | 8 + vendor/github.com/nats-io/nkeys/README.md | 69 + vendor/github.com/nats-io/nkeys/TODO.md | 5 + vendor/github.com/nats-io/nkeys/crc16.go | 68 + .../github.com/nats-io/nkeys/creds_utils.go | 78 + .../github.com/nats-io/nkeys/dependencies.md | 12 + vendor/github.com/nats-io/nkeys/errors.go | 50 + vendor/github.com/nats-io/nkeys/keypair.go | 146 + vendor/github.com/nats-io/nkeys/nkeys.go | 100 + vendor/github.com/nats-io/nkeys/public.go | 86 + vendor/github.com/nats-io/nkeys/strkey.go | 314 + vendor/github.com/nats-io/nkeys/xkeys.go | 185 + vendor/github.com/nats-io/nuid/.gitignore | 24 + vendor/github.com/nats-io/nuid/.travis.yml | 17 + vendor/github.com/nats-io/nuid/GOVERNANCE.md | 3 + vendor/github.com/nats-io/nuid/LICENSE | 201 + vendor/github.com/nats-io/nuid/MAINTAINERS.md | 6 + vendor/github.com/nats-io/nuid/README.md | 47 + vendor/github.com/nats-io/nuid/nuid.go | 135 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 744 +++ .../x/crypto/blake2b/blake2b_amd64.s | 278 + .../x/crypto/blake2b/blake2b_generic.go | 182 + .../x/crypto/blake2b/blake2b_ref.go | 11 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 30 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 + vendor/golang.org/x/crypto/nacl/box/box.go | 182 + .../x/crypto/nacl/secretbox/secretbox.go | 173 + .../x/crypto/salsa20/salsa/hsalsa20.go | 146 + .../x/crypto/salsa20/salsa/salsa208.go | 201 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 880 +++ .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 4 +- vendor/golang.org/x/crypto/ssh/channel.go | 28 +- vendor/golang.org/x/crypto/ssh/client.go | 2 +- vendor/golang.org/x/crypto/ssh/handshake.go | 56 +- vendor/golang.org/x/crypto/ssh/server.go | 2 + vendor/golang.org/x/crypto/ssh/transport.go | 32 +- vendor/modules.txt | 41 +- 205 files changed, 41408 insertions(+), 22 deletions(-) create mode 100644 pkg/event/bus.go create mode 100644 pkg/event/event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/sender.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/subscriber.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/write_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/alias.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/write.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/logger.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uri.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/value.go create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/nats-io/nats.go/.gitignore create mode 100644 vendor/github.com/nats-io/nats.go/.golangci.yaml create mode 100644 vendor/github.com/nats-io/nats.go/.travis.yml create mode 100644 vendor/github.com/nats-io/nats.go/.words create mode 100644 vendor/github.com/nats-io/nats.go/.words.readme create mode 100644 vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/nats-io/nats.go/GOVERNANCE.md create mode 100644 vendor/github.com/nats-io/nats.go/LICENSE create mode 100644 vendor/github.com/nats-io/nats.go/MAINTAINERS.md create mode 100644 vendor/github.com/nats-io/nats.go/README.md create mode 100644 vendor/github.com/nats-io/nats.go/context.go create mode 100644 vendor/github.com/nats-io/nats.go/dependencies.md create mode 100644 vendor/github.com/nats-io/nats.go/enc.go create mode 100644 vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go create mode 100644 vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go create mode 100644 vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go create mode 100644 vendor/github.com/nats-io/nats.go/go_test.mod create mode 100644 vendor/github.com/nats-io/nats.go/go_test.sum create mode 100644 vendor/github.com/nats-io/nats.go/internal/parser/parse.go create mode 100644 vendor/github.com/nats-io/nats.go/js.go create mode 100644 vendor/github.com/nats-io/nats.go/jserrors.go create mode 100644 vendor/github.com/nats-io/nats.go/jsm.go create mode 100644 vendor/github.com/nats-io/nats.go/kv.go create mode 100644 vendor/github.com/nats-io/nats.go/legacy_jetstream.md create mode 100644 vendor/github.com/nats-io/nats.go/nats.go create mode 100644 vendor/github.com/nats-io/nats.go/netchan.go create mode 100644 vendor/github.com/nats-io/nats.go/object.go create mode 100644 vendor/github.com/nats-io/nats.go/parser.go create mode 100644 vendor/github.com/nats-io/nats.go/rand.go create mode 100644 vendor/github.com/nats-io/nats.go/testing_internal.go create mode 100644 vendor/github.com/nats-io/nats.go/timer.go create mode 100644 vendor/github.com/nats-io/nats.go/util/tls.go create mode 100644 vendor/github.com/nats-io/nats.go/util/tls_go17.go create mode 100644 vendor/github.com/nats-io/nats.go/ws.go create mode 100644 vendor/github.com/nats-io/nkeys/.gitignore create mode 100644 vendor/github.com/nats-io/nkeys/.goreleaser.yml create mode 100644 vendor/github.com/nats-io/nkeys/GOVERNANCE.md create mode 100644 vendor/github.com/nats-io/nkeys/LICENSE create mode 100644 vendor/github.com/nats-io/nkeys/MAINTAINERS.md create mode 100644 vendor/github.com/nats-io/nkeys/README.md create mode 100644 vendor/github.com/nats-io/nkeys/TODO.md create mode 100644 vendor/github.com/nats-io/nkeys/crc16.go create mode 100644 vendor/github.com/nats-io/nkeys/creds_utils.go create mode 100644 vendor/github.com/nats-io/nkeys/dependencies.md create mode 100644 vendor/github.com/nats-io/nkeys/errors.go create mode 100644 vendor/github.com/nats-io/nkeys/keypair.go create mode 100644 vendor/github.com/nats-io/nkeys/nkeys.go create mode 100644 vendor/github.com/nats-io/nkeys/public.go create mode 100644 vendor/github.com/nats-io/nkeys/strkey.go create mode 100644 vendor/github.com/nats-io/nkeys/xkeys.go create mode 100644 vendor/github.com/nats-io/nuid/.gitignore create mode 100644 vendor/github.com/nats-io/nuid/.travis.yml create mode 100644 vendor/github.com/nats-io/nuid/GOVERNANCE.md create mode 100644 vendor/github.com/nats-io/nuid/LICENSE create mode 100644 vendor/github.com/nats-io/nuid/MAINTAINERS.md create mode 100644 vendor/github.com/nats-io/nuid/README.md create mode 100644 vendor/github.com/nats-io/nuid/nuid.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go diff --git a/go.mod b/go.mod index f889cd31..70329965 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/spf13/cobra v1.6.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/term v0.15.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -28,6 +28,8 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudevents/sdk-go/protocol/nats/v2 v2.15.2 // indirect + github.com/cloudevents/sdk-go/v2 v2.15.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -54,6 +56,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -63,6 +66,9 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nats-io/nats.go v1.31.0 // indirect + github.com/nats-io/nkeys v0.4.6 // indirect + github.com/nats-io/nuid v1.0.1 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect diff --git a/go.sum b/go.sum index b3cfc85f..a4043270 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,10 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go/protocol/nats/v2 v2.15.2 h1:grQPId+rXCeR5RcmK5uBlissnlot7kBlHd8YJ7iZOPg= +github.com/cloudevents/sdk-go/protocol/nats/v2 v2.15.2/go.mod h1:KQA5rf2uSgtCnXsAFyFXtwiDboL/pB6gsg4VTErhfLA= +github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= +github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -127,6 +131,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -155,6 +161,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E= +github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8= +github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY= +github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= @@ -247,6 +259,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= diff --git a/pkg/event/bus.go b/pkg/event/bus.go new file mode 100644 index 00000000..1612ca97 --- /dev/null +++ b/pkg/event/bus.go @@ -0,0 +1,43 @@ +package event + +import ( + "context" + "errors" + cenats "github.com/cloudevents/sdk-go/protocol/nats/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type EventBus struct { + client cloudevents.Client + NatsServer string + Subject string +} + +func NewEventBus(natsServer string, subject string) (*EventBus, error) { + p, err := cenats.NewSender(natsServer, subject, cenats.NatsOptions()) + if err != nil { + return nil, err + } + + defer p.Close(context.Background()) + + c, err := cloudevents.NewClient(p) + if err != nil { + return nil, err + } + return &EventBus{client: c, NatsServer: natsServer, Subject: subject}, nil +} + +func (bus *EventBus) Publish(ctx context.Context, event cloudevents.Event) error { + result := bus.client.Send(ctx, event) + if cloudevents.IsUndelivered(result) { + return errors.New("failed to publish") + } + return nil +} + +func (bus *EventBus) Subscribe(ctx context.Context, handler func(ctx context.Context, event cloudevents.Event)) error { + for { + bus.client.StartReceiver(ctx, handler) + } +} diff --git a/pkg/event/event.go b/pkg/event/event.go new file mode 100644 index 00000000..fc6e93af --- /dev/null +++ b/pkg/event/event.go @@ -0,0 +1,51 @@ +package event + +import ( + "fmt" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/google/uuid" +) + +type EventPipelineRun struct { + Ref string `json:"ref"` + Desc string `json:"desc"` + Variables string `json:"variables"` +} + +type EventTaskRun struct { + Ref string `json:"ref"` + Desc string `json:"desc"` + Variables string `json:"variables"` +} + +type EventInspection struct { + TypeRef string `json:"typeRef"` + NameRef string `json:"nameRef"` + NodeName string `json:"nodeName"` + Variables string `json:"variables"` + ThresholdValue string `json:"thresholdValue"` + Comparator string `json:"comparator"` + CurrentValue string `json:"currentValue"` + Status string `json:"status"` + Priority string `json:"priority"` +} + +func BuilderEvent(data interface{}) (cloudevents.Event, error) { + e := cloudevents.NewEvent() + e.SetID(uuid.New().String()) + e.SetSource("https://www.chenshaowen.com/ops/") + + var eventType string + switch v := data.(type) { + case EventInspection: + eventType = "ops.inspection" + case *EventInspection: + eventType = "ops.inspection" + default: + eventType = "ops.unknown" + return e, fmt.Errorf("unsupported data type: %T", v) + } + e.SetType(eventType) + err := e.SetData("application/json", data) + return e, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/doc.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/doc.go new file mode 100644 index 00000000..f0d9887b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package nats implements the CloudEvent transport implementation using NATS. +*/ +package nats diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/message.go new file mode 100644 index 00000000..63e4b861 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/message.go @@ -0,0 +1,46 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/nats-io/nats.go" +) + +// Message implements binding.Message by wrapping an *nats.Msg. +// This message *can* be read several times safely +type Message struct { + Msg *nats.Msg + encoding binding.Encoding +} + +// NewMessage wraps an *nats.Msg in a binding.Message. +// The returned message *can* be read several times safely +func NewMessage(msg *nats.Msg) *Message { + return &Message{Msg: msg, encoding: binding.EncodingStructured} +} + +var _ binding.Message = (*Message)(nil) + +func (m *Message) ReadEncoding() binding.Encoding { + return m.encoding +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + return encoder.SetStructuredEvent(ctx, format.JSON, bytes.NewReader(m.Msg.Data)) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error { + return binding.ErrNotBinary +} + +func (m *Message) Finish(err error) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/options.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/options.go new file mode 100644 index 00000000..e3acf739 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/options.go @@ -0,0 +1,52 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "errors" + + "github.com/nats-io/nats.go" +) + +var ErrInvalidQueueName = errors.New("invalid queue name for QueueSubscriber") + +// NatsOptions is a helper function to group a variadic nats.ProtocolOption into +// []nats.Option that can be used by either Sender, Consumer or Protocol +func NatsOptions(opts ...nats.Option) []nats.Option { + return opts +} + +// ProtocolOption is the function signature required to be considered an nats.ProtocolOption. +type ProtocolOption func(*Protocol) error + +func WithConsumerOptions(opts ...ConsumerOption) ProtocolOption { + return func(p *Protocol) error { + p.consumerOptions = opts + return nil + } +} + +func WithSenderOptions(opts ...SenderOption) ProtocolOption { + return func(p *Protocol) error { + p.senderOptions = opts + return nil + } +} + +type SenderOption func(*Sender) error + +type ConsumerOption func(*Consumer) error + +// WithQueueSubscriber configures the Consumer to join a queue group when subscribing +func WithQueueSubscriber(queue string) ConsumerOption { + return func(c *Consumer) error { + if queue == "" { + return ErrInvalidQueueName + } + c.Subscriber = &QueueSubscriber{Queue: queue} + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/protocol.go new file mode 100644 index 00000000..9f559bb7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/protocol.go @@ -0,0 +1,112 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + + "github.com/nats-io/nats.go" +) + +// Protocol is a reference implementation for using the CloudEvents binding +// integration. Protocol acts as both a NATS client and a NATS handler. +type Protocol struct { + Conn *nats.Conn + + Consumer *Consumer + consumerOptions []ConsumerOption + + Sender *Sender + senderOptions []SenderOption + + connOwned bool // whether this protocol created the nats connection +} + +// NewProtocol creates a new NATS protocol. +func NewProtocol(url, sendSubject, receiveSubject string, natsOpts []nats.Option, opts ...ProtocolOption) (*Protocol, error) { + conn, err := nats.Connect(url, natsOpts...) + if err != nil { + return nil, err + } + + p, err := NewProtocolFromConn(conn, sendSubject, receiveSubject, opts...) + if err != nil { + conn.Close() + return nil, err + } + + p.connOwned = true + + return p, nil +} + +func NewProtocolFromConn(conn *nats.Conn, sendSubject, receiveSubject string, opts ...ProtocolOption) (*Protocol, error) { + var err error + p := &Protocol{ + Conn: conn, + } + + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Consumer, err = NewConsumerFromConn(conn, receiveSubject, p.consumerOptions...); err != nil { + return nil, err + } + + if p.Sender, err = NewSenderFromConn(conn, sendSubject, p.senderOptions...); err != nil { + return nil, err + } + + return p, nil +} + +// Send implements Sender.Send +func (p *Protocol) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) error { + return p.Sender.Send(ctx, in, transformers...) +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + return p.Consumer.OpenInbound(ctx) +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + return p.Consumer.Receive(ctx) +} + +// Close implements Closer.Close +func (p *Protocol) Close(ctx context.Context) error { + if p.connOwned { + defer p.Conn.Close() + } + + if err := p.Consumer.Close(ctx); err != nil { + return err + } + + if err := p.Sender.Close(ctx); err != nil { + return err + } + + return nil +} + +func (p *Protocol) applyOptions(opts ...ProtocolOption) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +var _ protocol.Receiver = (*Protocol)(nil) +var _ protocol.Sender = (*Protocol)(nil) +var _ protocol.Opener = (*Protocol)(nil) +var _ protocol.Closer = (*Protocol)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/receiver.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/receiver.go new file mode 100644 index 00000000..b7c73887 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/receiver.go @@ -0,0 +1,145 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "context" + "io" + "sync" + + "github.com/nats-io/nats.go" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type msgErr struct { + msg binding.Message +} + +type Receiver struct { + incoming chan msgErr +} + +func NewReceiver() *Receiver { + return &Receiver{ + incoming: make(chan msgErr), + } +} + +// MsgHandler implements nats.MsgHandler and publishes messages onto our internal incoming channel to be delivered +// via r.Receive(ctx) +func (r *Receiver) MsgHandler(msg *nats.Msg) { + r.incoming <- msgErr{msg: NewMessage(msg)} +} + +func (r *Receiver) Receive(ctx context.Context) (binding.Message, error) { + select { + case msgErr, ok := <-r.incoming: + if !ok { + return nil, io.EOF + } + return msgErr.msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +type Consumer struct { + Receiver + + Conn *nats.Conn + Subject string + Subscriber Subscriber + + subMtx sync.Mutex + internalClose chan struct{} + connOwned bool +} + +func NewConsumer(url, subject string, natsOpts []nats.Option, opts ...ConsumerOption) (*Consumer, error) { + conn, err := nats.Connect(url, natsOpts...) + if err != nil { + return nil, err + } + + c, err := NewConsumerFromConn(conn, subject, opts...) + if err != nil { + conn.Close() + return nil, err + } + + c.connOwned = true + + return c, err +} + +func NewConsumerFromConn(conn *nats.Conn, subject string, opts ...ConsumerOption) (*Consumer, error) { + c := &Consumer{ + Receiver: *NewReceiver(), + Conn: conn, + Subject: subject, + Subscriber: &RegularSubscriber{}, + internalClose: make(chan struct{}, 1), + } + + err := c.applyOptions(opts...) + if err != nil { + return nil, err + } + + return c, nil +} + +func (c *Consumer) OpenInbound(ctx context.Context) error { + c.subMtx.Lock() + defer c.subMtx.Unlock() + + // Subscribe + sub, err := c.Subscriber.Subscribe(c.Conn, c.Subject, c.MsgHandler) + if err != nil { + return err + } + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-c.internalClose: + } + + // Finish to consume messages in the queue and close the subscription + return sub.Drain() +} + +func (c *Consumer) Close(ctx context.Context) error { + // Before closing, let's be sure OpenInbound completes + // We send a signal to close and then we lock on subMtx in order + // to wait OpenInbound to finish draining the queue + c.internalClose <- struct{}{} + c.subMtx.Lock() + defer c.subMtx.Unlock() + + if c.connOwned { + c.Conn.Close() + } + + close(c.internalClose) + + return nil +} + +func (c *Consumer) applyOptions(opts ...ConsumerOption) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +var _ protocol.Opener = (*Consumer)(nil) +var _ protocol.Receiver = (*Consumer)(nil) +var _ protocol.Closer = (*Consumer)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/sender.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/sender.go new file mode 100644 index 00000000..55020720 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/sender.go @@ -0,0 +1,97 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "bytes" + "context" + "fmt" + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + + "github.com/nats-io/nats.go" +) + +type Sender struct { + Conn *nats.Conn + Subject string + + connOwned bool +} + +// NewSender creates a new protocol.Sender responsible for opening and closing the NATS connection +func NewSender(url, subject string, natsOpts []nats.Option, opts ...SenderOption) (*Sender, error) { + conn, err := nats.Connect(url, natsOpts...) + if err != nil { + return nil, err + } + + s, err := NewSenderFromConn(conn, subject, opts...) + if err != nil { + conn.Close() + return nil, err + } + + s.connOwned = true + + return s, nil +} + +// NewSenderFromConn creates a new protocol.Sender which leaves responsibility for opening and closing the NATS +// connection to the caller +func NewSenderFromConn(conn *nats.Conn, subject string, opts ...SenderOption) (*Sender, error) { + s := &Sender{ + Conn: conn, + Subject: subject, + } + + err := s.applyOptions(opts...) + if err != nil { + return nil, err + } + + return s, nil +} + +func (s *Sender) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) { + defer func() { + if err2 := in.Finish(err); err2 != nil { + if err == nil { + err = err2 + } else { + err = fmt.Errorf("failed to call in.Finish() when error already occurred: %s: %w", err2.Error(), err) + } + } + }() + + writer := new(bytes.Buffer) + if err = WriteMsg(ctx, in, writer, transformers...); err != nil { + return err + } + return s.Conn.Publish(s.Subject, writer.Bytes()) +} + +// Close implements Closer.Close +// This method only closes the connection if the Sender opened it +func (s *Sender) Close(_ context.Context) error { + if s.connOwned { + s.Conn.Close() + } + + return nil +} + +func (s *Sender) applyOptions(opts ...SenderOption) error { + for _, fn := range opts { + if err := fn(s); err != nil { + return err + } + } + return nil +} + +var _ protocol.Sender = (*Sender)(nil) +var _ protocol.Closer = (*Protocol)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/subscriber.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/subscriber.go new file mode 100644 index 00000000..a644173c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/subscriber.go @@ -0,0 +1,38 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "github.com/nats-io/nats.go" +) + +// The Subscriber interface allows us to configure how the subscription is created +type Subscriber interface { + Subscribe(conn *nats.Conn, subject string, cb nats.MsgHandler) (*nats.Subscription, error) +} + +// RegularSubscriber creates regular subscriptions +type RegularSubscriber struct { +} + +// Subscribe implements Subscriber.Subscribe +func (s *RegularSubscriber) Subscribe(conn *nats.Conn, subject string, cb nats.MsgHandler) (*nats.Subscription, error) { + return conn.Subscribe(subject, cb) +} + +var _ Subscriber = (*RegularSubscriber)(nil) + +// QueueSubscriber creates queue subscriptions +type QueueSubscriber struct { + Queue string +} + +// Subscribe implements Subscriber.Subscribe +func (s *QueueSubscriber) Subscribe(conn *nats.Conn, subject string, cb nats.MsgHandler) (*nats.Subscription, error) { + return conn.QueueSubscribe(subject, s.Queue, cb) +} + +var _ Subscriber = (*QueueSubscriber)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/write_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/write_message.go new file mode 100644 index 00000000..13c57f38 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/nats/v2/write_message.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package nats + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "io" +) + +// WriteMsg fills the provided writer with the bindings.Message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteMsg(ctx context.Context, m binding.Message, writer io.ReaderFrom, transformers ...binding.Transformer) error { + structuredWriter := &natsMessageWriter{writer} + + _, err := binding.Write( + ctx, + m, + structuredWriter, + nil, + transformers..., + ) + return err +} + +type natsMessageWriter struct { + io.ReaderFrom +} + +func (w *natsMessageWriter) SetStructuredEvent(_ context.Context, _ format.Format, event io.Reader) error { + if _, err := w.ReadFrom(event); err != nil { + return err + } + + return nil +} + +var _ binding.StructuredWriter = (*natsMessageWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go new file mode 100644 index 00000000..2fbfaa9a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -0,0 +1,187 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +// Package v2 reexports a subset of the SDK v2 API. +package v2 + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/client" + "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Client + +type ClientOption = client.Option +type Client = client.Client + +// Event + +type Event = event.Event +type Result = protocol.Result + +// Context + +type EventContext = event.EventContext +type EventContextV1 = event.EventContextV1 +type EventContextV03 = event.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URIRef = types.URIRef + +// HTTP Protocol + +type HTTPOption = http.Option + +type HTTPProtocol = http.Protocol + +// Encoding + +type Encoding = binding.Encoding + +// Message + +type Message = binding.Message + +const ( + // ReadEncoding + + ApplicationXML = event.ApplicationXML + ApplicationJSON = event.ApplicationJSON + TextPlain = event.TextPlain + ApplicationCloudEventsJSON = event.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = event.ApplicationCloudEventsBatchJSON + Base64 = event.Base64 + + // Event Versions + + VersionV1 = event.CloudEventsVersionV1 + VersionV03 = event.CloudEventsVersionV03 + + // Encoding + + EncodingBinary = binding.EncodingBinary + EncodingStructured = binding.EncodingStructured +) + +var ( + + // ContentType Helpers + + StringOfApplicationJSON = event.StringOfApplicationJSON + StringOfApplicationXML = event.StringOfApplicationXML + StringOfTextPlain = event.StringOfTextPlain + StringOfApplicationCloudEventsJSON = event.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = event.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = event.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewClientHTTP = client.NewHTTP + // Deprecated: please use New with the observability options. + NewClientObserved = client.NewObserved + // Deprecated: Please use NewClientHTTP with the observability options. + NewDefaultClient = client.NewDefault + NewHTTPReceiveHandler = client.NewHTTPReceiveHandler + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + // Deprecated: this is now noop and will be removed in future releases. + WithTracePropagation = client.WithTracePropagation() + + // Event Creation + + NewEvent = event.New + + // Results + + NewResult = protocol.NewResult + ResultIs = protocol.ResultIs + ResultAs = protocol.ResultAs + + // Receipt helpers + + NewReceipt = protocol.NewReceipt + + ResultACK = protocol.ResultACK + ResultNACK = protocol.ResultNACK + + IsACK = protocol.IsACK + IsNACK = protocol.IsNACK + IsUndelivered = protocol.IsUndelivered + + // HTTP Results + + NewHTTPResult = http.NewResult + NewHTTPRetriesResult = http.NewRetriesResult + + // Message Creation + + ToMessage = binding.ToMessage + + // Event Creation + + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch + + // HTTP Messages + + WriteHTTPRequest = http.WriteRequest + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + ContextWithRetriesConstantBackoff = context.WithRetriesConstantBackoff + ContextWithRetriesLinearBackoff = context.WithRetriesLinearBackoff + ContextWithRetriesExponentialBackoff = context.WithRetriesExponentialBackoff + + WithEncodingBinary = binding.WithForceBinary + WithEncodingStructured = binding.WithForceStructured + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURIRef = types.ParseURIRef + ParseURI = types.ParseURI + + // HTTP Protocol + + NewHTTP = http.New + + // HTTP Protocol Options + + WithTarget = http.WithTarget + WithHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + //WithEncoding = http.WithEncoding + //WithStructuredEncoding = http.WithStructuredEncoding // TODO: expose new way + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithListener = http.WithListener + WithRoundTripper = http.WithRoundTripper + WithGetHandlerFunc = http.WithGetHandlerFunc + WithOptionsHandlerFunc = http.WithOptionsHandlerFunc + WithDefaultOptionsHandlerFunc = http.WithDefaultOptionsHandlerFunc +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go new file mode 100644 index 00000000..97f2c4dd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -0,0 +1,52 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageMetadataWriter is used to set metadata when a binary Message is visited. +type MessageMetadataWriter interface { + // Set a standard attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the attribute should be deleted. + // See package types to perform the needed conversions. + SetAttribute(attribute spec.Attribute, value interface{}) error + + // Set an extension attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the extension should be deleted. + // See package types to perform the needed conversions. + SetExtension(name string, value interface{}) error +} + +// BinaryWriter is used to visit a binary Message and generate a new representation. +// +// Protocols that supports binary encoding should implement this interface to implement direct +// binary to binary encoding and event to binary encoding. +// +// Start() and End() methods must be invoked by the caller of Message.ReadBinary() every time +// the BinaryWriter implementation is used to visit a Message. +type BinaryWriter interface { + MessageMetadataWriter + + // Method invoked at the beginning of the visit. Useful to perform initial memory allocations + Start(ctx context.Context) error + + // SetData receives an io.Reader for the data attribute. + // io.Reader is not invoked when the data attribute is empty + SetData(data io.Reader) error + + // End method is invoked only after the whole encoding process ends successfully. + // If it fails, it's never invoked. It can be used to finalize the message. + End(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go new file mode 100644 index 00000000..ff92f683 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -0,0 +1,66 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package binding defines interfaces for protocol bindings. + +NOTE: Most applications that emit or consume events should use the ../client +package, which provides a simpler API to the underlying binding. + +The interfaces in this package provide extra encoding and protocol information +to allow efficient forwarding and end-to-end reliable delivery between a +Receiver and a Sender belonging to different bindings. This is useful for +intermediary applications that route or forward events, but not necessary for +most "endpoint" applications that emit or consume events. + +# Protocol Bindings + +A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. + +# Read and write events + +The core of this package is the binding.Message interface. +Through binding.MessageReader It defines how to read a protocol specific message for an +encoded event in structured mode or binary mode. +The entity who receives a protocol specific data structure representing a message +(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage). +Then the entity that wants to send the binding.Message back on the wire, +translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using +the writers BinaryWriter and StructuredWriter specific to that protocol. +Binding implementations exposes their writers +through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage), +in order to simplify the encoding process. + +The encoding process can be customized in order to mutate the final result with binding.TransformerFactory. +A bunch of these are provided directly by the binding/transformer module. + +Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself. +In order to consume a message several times, the binding/buffering package provides several APIs to buffer the Message. + +A message can be converted to an event.Event using binding.ToEvent() method. +An event.Event can be used as Message casting it to binding.EventMessage. + +In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite. +The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible. + +Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. +Every Message wrapper implements the MessageWrapper interface + +# Sender and Receiver + +A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. + +A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method +and sends them. + +Message and ExactlyOnceMessage provide methods to allow acknowledgments to +propagate when a reliable messages is forwarded from a Receiver to a Sender. +QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. + +# Transport + +A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. +*/ +package binding diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go new file mode 100644 index 00000000..bb8f9142 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "errors" + +// Encoding enum specifies the type of encodings supported by binding interfaces +type Encoding int + +const ( + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message + EncodingBinary Encoding = iota + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message + EncodingStructured + // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) + EncodingEvent + // When the encoding is unknown (which means that the message is a non-event) + EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch +) + +func (e Encoding) String() string { + switch e { + case EncodingBinary: + return "binary" + case EncodingStructured: + return "structured" + case EncodingEvent: + return "event" + case EncodingBatch: + return "batch" + case EncodingUnknown: + return "unknown" + } + return "" +} + +// ErrUnknownEncoding specifies that the Message is not an event or it is encoded with an unknown encoding +var ErrUnknownEncoding = errors.New("unknown Message encoding") + +// ErrNotStructured returned by Message.Structured for non-structured messages. +var ErrNotStructured = errors.New("message is not in structured mode") + +// ErrNotBinary returned by Message.Binary for non-binary messages. +var ErrNotBinary = errors.New("message is not in binary mode") diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go new file mode 100644 index 00000000..83d613af --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventFormatKey int + +const ( + formatEventStructured eventFormatKey = iota +) + +// EventMessage type-converts a event.Event object to implement Message. +// This allows local event.Event objects to be sent directly via Sender.Send() +// +// s.Send(ctx, binding.EventMessage(e)) +// +// When an event is wrapped into a EventMessage, the original event could be +// potentially mutated. If you need to use the Event again, after wrapping it into +// an Event message, you should copy it before +type EventMessage event.Event + +func ToMessage(e *event.Event) Message { + return (*EventMessage)(e) +} + +func (m *EventMessage) ReadEncoding() Encoding { + return EncodingEvent +} + +func (m *EventMessage) ReadStructured(ctx context.Context, builder StructuredWriter) error { + f := GetOrDefaultFromCtx(ctx, formatEventStructured, format.JSON).(format.Format) + b, err := f.Marshal((*event.Event)(m)) + if err != nil { + return err + } + return builder.SetStructuredEvent(ctx, f, bytes.NewReader(b)) +} + +func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err error) { + err = eventContextToBinaryWriter(m.Context, b) + if err != nil { + return err + } + // Pass the body + body := (*event.Event)(m).Data() + if len(body) > 0 { + err = b.SetData(bytes.NewBuffer(body)) + if err != nil { + return err + } + } + return nil +} + +func (m *EventMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + sv := spec.VS.Version(m.Context.GetSpecVersion()) + a := sv.AttributeFromKind(k) + if a != nil { + return a, a.Get(m.Context) + } + return nil, nil +} + +func (m *EventMessage) GetExtension(name string) interface{} { + ext, _ := m.Context.GetExtension(name) + return ext +} + +func eventContextToBinaryWriter(c event.EventContext, b BinaryWriter) (err error) { + // Pass all attributes + sv := spec.VS.Version(c.GetSpecVersion()) + for _, a := range sv.Attributes() { + value := a.Get(c) + if value != nil { + err = b.SetAttribute(a, value) + } + if err != nil { + return err + } + } + // Pass all extensions + for k, v := range c.GetExtensions() { + err = b.SetExtension(k, v) + if err != nil { + return err + } + } + return nil +} + +func (*EventMessage) Finish(error) error { return nil } + +var _ Message = (*EventMessage)(nil) // Test it conforms to the interface +var _ MessageMetadataReader = (*EventMessage)(nil) // Test it conforms to the interface + +// UseFormatForEvent configures which format to use when marshalling the event to structured mode +func UseFormatForEvent(ctx context.Context, f format.Format) context.Context { + return context.WithValue(ctx, formatEventStructured, f) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go new file mode 100644 index 00000000..8b51c4c6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "github.com/cloudevents/sdk-go/v2/binding/spec" + +type finishMessage struct { + Message + finish func(error) +} + +func (m *finishMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + return m.Message.(MessageMetadataReader).GetAttribute(k) +} + +func (m *finishMessage) GetExtension(s string) interface{} { + return m.Message.(MessageMetadataReader).GetExtension(s) +} + +func (m *finishMessage) GetWrappedMessage() Message { + return m.Message +} + +func (m *finishMessage) Finish(err error) error { + err2 := m.Message.Finish(err) // Finish original message first + if m.finish != nil { + m.finish(err) // Notify callback + } + return err2 +} + +var _ MessageWrapper = (*finishMessage)(nil) + +// WithFinish returns a wrapper for m that calls finish() and +// m.Finish() in its Finish(). +// Allows code to be notified when a message is Finished. +func WithFinish(m Message, finish func(error)) Message { + return &finishMessage{Message: m, finish: finish} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go new file mode 100644 index 00000000..54c3f1a8 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package format formats structured events. + +The "application/cloudevents+json" format is built-in and always +available. Other formats may be added. +*/ +package format diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go new file mode 100644 index 00000000..6bdd1842 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -0,0 +1,105 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package format + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Format marshals and unmarshals structured events to bytes. +type Format interface { + // MediaType identifies the format + MediaType() string + // Marshal event to bytes + Marshal(*event.Event) ([]byte, error) + // Unmarshal bytes to event + Unmarshal([]byte, *event.Event) error +} + +// Prefix for event-format media types. +const Prefix = "application/cloudevents" + +// IsFormat returns true if mediaType begins with "application/cloudevents" +func IsFormat(mediaType string) bool { return strings.HasPrefix(mediaType, Prefix) } + +// JSON is the built-in "application/cloudevents+json" format. +var JSON = jsonFmt{} + +type jsonFmt struct{} + +func (jsonFmt) MediaType() string { return event.ApplicationCloudEventsJSON } + +func (jsonFmt) Marshal(e *event.Event) ([]byte, error) { return json.Marshal(e) } +func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { + return json.Unmarshal(b, e) +} + +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + +// built-in formats +var formats map[string]Format + +func init() { + formats = map[string]Format{} + Add(JSON) + Add(JSONBatch) +} + +// Lookup returns the format for contentType, or nil if not found. +func Lookup(contentType string) Format { + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + contentType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + return formats[contentType] +} + +func unknown(mediaType string) error { + return fmt.Errorf("unknown event format media-type %#v", mediaType) +} + +// Add a new Format. It can be retrieved by Lookup(f.MediaType()) +func Add(f Format) { formats[f.MediaType()] = f } + +// Marshal an event to bytes using the mediaType event format. +func Marshal(mediaType string, e *event.Event) ([]byte, error) { + if f := formats[mediaType]; f != nil { + return f.Marshal(e) + } + return nil, unknown(mediaType) +} + +// Unmarshal bytes to an event using the mediaType event format. +func Unmarshal(mediaType string, b []byte, e *event.Event) error { + if f := formats[mediaType]; f != nil { + return f.Unmarshal(b, e) + } + return unknown(mediaType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go new file mode 100644 index 00000000..2fb136c6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageReader defines the read-related portion of the Message interface. +// +// The ReadStructured and ReadBinary methods allows to perform an optimized encoding of a Message to a specific data structure. +// +// If MessageReader.ReadEncoding() can be equal to EncodingBinary, then the implementation of MessageReader +// MUST also implement MessageMetadataReader. +// +// A Sender should try each method of interest and fall back to binding.ToEvent() if none are supported. +// An out of the box algorithm is provided for writing a message: binding.Write(). +type MessageReader interface { + // Return the type of the message Encoding. + // The encoding should be preferably computed when the message is constructed. + ReadEncoding() Encoding + + // ReadStructured transfers a structured-mode event to a StructuredWriter. + // It must return ErrNotStructured if message is not in structured mode. + // + // Returns a different err if something wrong happened while trying to read the structured event. + // In this case, the caller must Finish the message with appropriate error. + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable structured form. + ReadStructured(context.Context, StructuredWriter) error + + // ReadBinary transfers a binary-mode event to an BinaryWriter. + // It must return ErrNotBinary if message is not in binary mode. + // + // The implementation of ReadBinary must not control the lifecycle with BinaryWriter.Start() and BinaryWriter.End(), + // because the caller must control the lifecycle. + // + // Returns a different err if something wrong happened while trying to read the binary event + // In this case, the caller must Finish the message with appropriate error + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable binary form. + ReadBinary(context.Context, BinaryWriter) error +} + +// MessageMetadataReader defines how to read metadata from a binary/event message +// +// If a message implementing MessageReader is encoded as binary (MessageReader.ReadEncoding() == EncodingBinary) +// or it's an EventMessage, then it's safe to assume that it also implements this interface +type MessageMetadataReader interface { + // GetAttribute returns: + // + // * attribute, value: if the message contains an attribute of that attribute kind + // * attribute, nil: if the message spec version supports the attribute kind, but doesn't have any value + // * nil, nil: if the message spec version doesn't support the attribute kind + GetAttribute(attributeKind spec.Kind) (spec.Attribute, interface{}) + // GetExtension returns the value of that extension, if any. + GetExtension(name string) interface{} +} + +// Message is the interface to a binding-specific message containing an event. +// +// # Reliable Delivery +// +// There are 3 reliable qualities of service for messages: +// +// 0/at-most-once/unreliable: messages can be dropped silently. +// +// 1/at-least-once: messages are not dropped without signaling an error +// to the sender, but they may be duplicated in the event of a re-send. +// +// 2/exactly-once: messages are never dropped (without error) or +// duplicated, as long as both sending and receiving ends maintain +// some binding-specific delivery state. Whether this is persisted +// depends on the configuration of the binding implementations. +// +// The Message interface supports QoS 0 and 1, the ExactlyOnceMessage interface +// supports QoS 2 +// +// Message includes the MessageReader interface to read messages. Every binding.Message implementation *must* specify if the message can be accessed one or more times. +// +// When a Message can be forgotten by the entity who produced the message, Message.Finish() *must* be invoked. +type Message interface { + MessageReader + + // Finish *must* be called when message from a Receiver can be forgotten by + // the receiver. A QoS 1 sender should not call Finish() until it gets an acknowledgment of + // receipt on the underlying transport. For QoS 2 see ExactlyOnceMessage. + // + // Note that, depending on the Message implementation, forgetting to Finish the message + // could produce memory/resources leaks! + // + // Passing a non-nil err indicates sending or processing failed. + // A non-nil return indicates that the message was not accepted + // by the receivers peer. + Finish(error) error +} + +// ExactlyOnceMessage is implemented by received Messages +// that support QoS 2. Only transports that support QoS 2 need to +// implement or use this interface. +type ExactlyOnceMessage interface { + Message + + // Received is called by a forwarding QoS2 Sender when it gets + // acknowledgment of receipt (e.g. AMQP 'accept' or MQTT PUBREC) + // + // The receiver must call settle(nil) when it get's the ack-of-ack + // (e.g. AMQP 'settle' or MQTT PUBCOMP) or settle(err) if the + // transfer fails. + // + // Finally the Sender calls Finish() to indicate the message can be + // discarded. + // + // If sending fails, or if the sender does not support QoS 2, then + // Finish() may be called without any call to Received() + Received(settle func(error)) +} + +// MessageContext interface exposes the internal context that a message might contain +// Only some Message implementations implement this interface. +type MessageContext interface { + // Get the context associated with this message + Context() context.Context +} + +// MessageWrapper interface is used to walk through a decorated Message and unwrap it. +type MessageWrapper interface { + Message + MessageMetadataReader + + // Method to get the wrapped message + GetWrappedMessage() Message +} + +func UnwrapMessage(message Message) Message { + m := message + for m != nil { + switch mt := m.(type) { + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + return m + } + } + return m +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go new file mode 100644 index 00000000..3c3021d4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Kind is a version-independent identifier for a CloudEvent context attribute. +type Kind uint8 + +const ( + // Required cloudevents attributes + ID Kind = iota + Source + SpecVersion + Type + // Optional cloudevents attributes + DataContentType + DataSchema + Subject + Time +) +const nAttrs = int(Time) + 1 + +var kindNames = [nAttrs]string{ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "dataschema", + "subject", + "time", +} + +// String is a human-readable string, for a valid attribute name use Attribute.Name +func (k Kind) String() string { return kindNames[k] } + +// IsRequired returns true for attributes defined as "required" by the CE spec. +func (k Kind) IsRequired() bool { return k < DataContentType } + +// Attribute is a named attribute accessor. +// The attribute name is specific to a Version. +type Attribute interface { + Kind() Kind + // Name of the attribute with respect to the current spec Version() with prefix + PrefixedName() string + // Name of the attribute with respect to the current spec Version() + Name() string + // Version of the spec that this attribute belongs to + Version() Version + // Get the value of this attribute from an event context + Get(event.EventContextReader) interface{} + // Set the value of this attribute on an event context + Set(event.EventContextWriter, interface{}) error + // Delete this attribute from and event context, when possible + Delete(event.EventContextWriter) error +} + +// accessor provides Kind, Get, Set. +type accessor interface { + Kind() Kind + Get(event.EventContextReader) interface{} + Set(event.EventContextWriter, interface{}) error + Delete(event.EventContextWriter) error +} + +var acc = [nAttrs]accessor{ + &aStr{aKind(ID), event.EventContextReader.GetID, event.EventContextWriter.SetID}, + &aStr{aKind(Source), event.EventContextReader.GetSource, event.EventContextWriter.SetSource}, + &aStr{aKind(SpecVersion), event.EventContextReader.GetSpecVersion, func(writer event.EventContextWriter, s string) error { return nil }}, + &aStr{aKind(Type), event.EventContextReader.GetType, event.EventContextWriter.SetType}, + &aStr{aKind(DataContentType), event.EventContextReader.GetDataContentType, event.EventContextWriter.SetDataContentType}, + &aStr{aKind(DataSchema), event.EventContextReader.GetDataSchema, event.EventContextWriter.SetDataSchema}, + &aStr{aKind(Subject), event.EventContextReader.GetSubject, event.EventContextWriter.SetSubject}, + &aTime{aKind(Time), event.EventContextReader.GetTime, event.EventContextWriter.SetTime}, +} + +// aKind implements Kind() +type aKind Kind + +func (kind aKind) Kind() Kind { return Kind(kind) } + +type aStr struct { + aKind + get func(event.EventContextReader) string + set func(event.EventContextWriter, string) error +} + +func (a *aStr) Get(c event.EventContextReader) interface{} { + if s := a.get(c); s != "" { + return s + } + return nil // Treat blank as missing +} + +func (a *aStr) Set(c event.EventContextWriter, v interface{}) error { + s, err := types.ToString(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, s) +} + +func (a *aStr) Delete(c event.EventContextWriter) error { + return a.set(c, "") +} + +type aTime struct { + aKind + get func(event.EventContextReader) time.Time + set func(event.EventContextWriter, time.Time) error +} + +func (a *aTime) Get(c event.EventContextReader) interface{} { + if v := a.get(c); !v.IsZero() { + return v + } + return nil // Treat zero time as missing. +} + +func (a *aTime) Set(c event.EventContextWriter, v interface{}) error { + t, err := types.ToTime(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, t) +} + +func (a *aTime) Delete(c event.EventContextWriter) error { + return a.set(c, time.Time{}) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go new file mode 100644 index 00000000..da5bc9f8 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package spec provides spec-version metadata. + +For use by code that maps events using (prefixed) attribute name strings. +Supports handling multiple spec versions uniformly. +*/ +package spec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go new file mode 100644 index 00000000..110787dd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -0,0 +1,81 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "github.com/cloudevents/sdk-go/v2/event" +) + +type matchExactVersion struct { + version +} + +func (v *matchExactVersion) Attribute(name string) Attribute { return v.attrMap[name] } + +var _ Version = (*matchExactVersion)(nil) + +func newMatchExactVersionVersion( + prefix string, + attributeNameMatchMapper func(string) string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *matchExactVersion { + v := &matchExactVersion{ + version: version{ + prefix: prefix, + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + }, + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[attributeNameMatchMapper(a.name)] = a + } + return v +} + +// WithPrefixMatchExact returns a set of versions with prefix added to all attribute names. +func WithPrefixMatchExact(attributeNameMatchMapper func(string) string, prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go new file mode 100644 index 00000000..7fa0f584 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -0,0 +1,189 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Version provides meta-data for a single spec-version. +type Version interface { + // String name of the version, e.g. "1.0" + String() string + // Prefix for attribute names. + Prefix() string + // Attribute looks up a prefixed attribute name (case insensitive). + // Returns nil if not found. + Attribute(prefixedName string) Attribute + // Attribute looks up the attribute from kind. + // Returns nil if not found. + AttributeFromKind(kind Kind) Attribute + // Attributes returns all the context attributes for this version. + Attributes() []Attribute + // Convert translates a context to this version. + Convert(event.EventContextConverter) event.EventContext + // NewContext returns a new context for this version. + NewContext() event.EventContext + // SetAttribute sets named attribute to value. + // + // Name is case insensitive. + // Does nothing if name does not start with prefix. + SetAttribute(context event.EventContextWriter, name string, value interface{}) error +} + +// Versions contains all known versions with the same attribute prefix. +type Versions struct { + prefix string + all []Version + m map[string]Version +} + +// Versions returns the list of all known versions, most recent first. +func (vs *Versions) Versions() []Version { return vs.all } + +// Version returns the named version. +func (vs *Versions) Version(name string) Version { + return vs.m[name] +} + +// Latest returns the latest Version +func (vs *Versions) Latest() Version { return vs.all[0] } + +// PrefixedSpecVersionName returns the specversion attribute PrefixedName +func (vs *Versions) PrefixedSpecVersionName() string { return vs.prefix + "specversion" } + +// Prefix is the lowercase attribute name prefix. +func (vs *Versions) Prefix() string { return vs.prefix } + +type attribute struct { + accessor + name string + version Version +} + +func (a *attribute) PrefixedName() string { return a.version.Prefix() + a.name } +func (a *attribute) Name() string { return a.name } +func (a *attribute) Version() Version { return a.version } + +type version struct { + prefix string + context event.EventContext + convert func(event.EventContextConverter) event.EventContext + attrMap map[string]Attribute + attrs []Attribute +} + +func (v *version) Attribute(name string) Attribute { return v.attrMap[strings.ToLower(name)] } +func (v *version) Attributes() []Attribute { return v.attrs } +func (v *version) String() string { return v.context.GetSpecVersion() } +func (v *version) Prefix() string { return v.prefix } +func (v *version) NewContext() event.EventContext { return v.context.Clone() } + +// HasPrefix is a case-insensitive prefix check. +func (v *version) HasPrefix(name string) bool { + return strings.HasPrefix(strings.ToLower(name), v.prefix) +} + +func (v *version) Convert(c event.EventContextConverter) event.EventContext { return v.convert(c) } + +func (v *version) SetAttribute(c event.EventContextWriter, name string, value interface{}) error { + if a := v.Attribute(name); a != nil { // Standard attribute + return a.Set(c, value) + } + name = strings.ToLower(name) + var err error + if v.HasPrefix(name) { // Extension attribute + return c.SetExtension(strings.TrimPrefix(name, v.prefix), value) + } + return err +} + +func (v *version) AttributeFromKind(kind Kind) Attribute { + for _, a := range v.Attributes() { + if a.Kind() == kind { + return a + } + } + return nil +} + +func newVersion( + prefix string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *version { + v := &version{ + prefix: strings.ToLower(prefix), + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[strings.ToLower(a.PrefixedName())] = a + } + return v +} + +// WithPrefix returns a set of versions with prefix added to all attribute names. +func WithPrefix(prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newVersion(prefix, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newVersion(prefix, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} + +// New returns a set of versions +func New() *Versions { return WithPrefix("") } + +// Built-in un-prefixed versions. +var ( + VS *Versions + V03 Version + V1 Version +) + +func init() { + VS = New() + V03 = VS.Version("0.3") + V1 = VS.Version("1.0") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go new file mode 100644 index 00000000..60256f2b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -0,0 +1,22 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" +) + +// StructuredWriter is used to visit a structured Message and generate a new representation. +// +// Protocols that supports structured encoding should implement this interface to implement direct +// structured to structured encoding and event to structured encoding. +type StructuredWriter interface { + // Event receives an io.Reader for the whole event. + SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go new file mode 100644 index 00000000..d3332c15 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/types" +) + +// ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails +var ErrCannotConvertToEvent = errors.New("cannot convert message to event") + +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + +// ToEvent translates a Message with a valid Structured or Binary representation to an Event. +// This function returns the Event generated from the Message and the original encoding of the message or +// an error that points the conversion error. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +func ToEvent(ctx context.Context, message MessageReader, transformers ...Transformer) (*event.Event, error) { + if message == nil { + return nil, nil + } + + messageEncoding := message.ReadEncoding() + if messageEncoding == EncodingEvent { + m := message + for m != nil { + switch mt := m.(type) { + case *EventMessage: + e := (*event.Event)(mt) + return e, Transformers(transformers).Transform(mt, (*messageToEventBuilder)(e)) + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + break + } + } + return nil, ErrCannotConvertToEvent + } + + e := event.New() + encoder := (*messageToEventBuilder)(&e) + _, err := DirectWrite( + context.Background(), + message, + encoder, + encoder, + ) + if err != nil { + return nil, err + } + return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) +} + +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + +type messageToEventBuilder event.Event + +var _ StructuredWriter = (*messageToEventBuilder)(nil) +var _ BinaryWriter = (*messageToEventBuilder)(nil) + +func (b *messageToEventBuilder) SetStructuredEvent(ctx context.Context, format format.Format, ev io.Reader) error { + var buf bytes.Buffer + _, err := io.Copy(&buf, ev) + if err != nil { + return err + } + return format.Unmarshal(buf.Bytes(), (*event.Event)(b)) +} + +func (b *messageToEventBuilder) Start(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) End(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) SetData(data io.Reader) error { + buf, ok := data.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, data) + if err != nil { + return err + } + } + if buf.Len() > 0 { + b.DataEncoded = buf.Bytes() + } + return nil +} + +func (b *messageToEventBuilder) SetAttribute(attribute spec.Attribute, value interface{}) error { + if value == nil { + _ = attribute.Delete(b.Context) + return nil + } + // If spec version we need to change to right context struct + if attribute.Kind() == spec.SpecVersion { + str, err := types.ToString(value) + if err != nil { + return err + } + switch str { + case event.CloudEventsVersionV03: + b.Context = b.Context.AsV03() + case event.CloudEventsVersionV1: + b.Context = b.Context.AsV1() + default: + return fmt.Errorf("unrecognized event version %s", str) + } + return nil + } + return attribute.Set(b.Context, value) +} + +func (b *messageToEventBuilder) SetExtension(name string, value interface{}) error { + if value == nil { + return b.Context.SetExtension(name, nil) + } + value, err := types.Validate(value) + if err != nil { + return err + } + return b.Context.SetExtension(name, value) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go new file mode 100644 index 00000000..de3bec44 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +// Transformer is an interface that implements a transformation +// process while transferring the event from the Message +// implementation to the provided encoder +// +// When a write function (binding.Write, binding.ToEvent, buffering.CopyMessage, etc.) +// takes Transformer(s) as parameter, it eventually converts the message to a form +// which correctly implements MessageMetadataReader, in order to guarantee that transformation +// is applied +type Transformer interface { + Transform(MessageMetadataReader, MessageMetadataWriter) error +} + +// TransformerFunc is a type alias to implement a Transformer through a function pointer +type TransformerFunc func(MessageMetadataReader, MessageMetadataWriter) error + +func (t TransformerFunc) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + return t(r, w) +} + +var _ Transformer = (TransformerFunc)(nil) + +// Transformers is a utility alias to run several Transformer +type Transformers []Transformer + +func (t Transformers) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + for _, transformer := range t { + err := transformer.Transform(r, w) + if err != nil { + return err + } + } + return nil +} + +var _ Transformer = (Transformers)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go new file mode 100644 index 00000000..cb498e62 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -0,0 +1,179 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventEncodingKey int + +const ( + skipDirectStructuredEncoding eventEncodingKey = iota + skipDirectBinaryEncoding + preferredEventEncoding +) + +// DirectWrite invokes the encoders. structuredWriter and binaryWriter could be nil if the protocol doesn't support it. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// This function MUST be invoked only if message.ReadEncoding() == EncodingBinary or message.ReadEncoding() == EncodingStructured +// +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingStructured, err if message was structured but error happened during the encoding +// * EncodingBinary, err if message was binary but error happened during the encoding +// * EncodingUnknown, ErrUnknownEncoding if message is not a structured or a binary Message +func DirectWrite( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + if structuredWriter != nil && len(transformers) == 0 && !GetOrDefaultFromCtx(ctx, skipDirectStructuredEncoding, false).(bool) { + if err := message.ReadStructured(ctx, structuredWriter); err == nil { + return EncodingStructured, nil + } else if err != ErrNotStructured { + return EncodingStructured, err + } + } + + if binaryWriter != nil && !GetOrDefaultFromCtx(ctx, skipDirectBinaryEncoding, false).(bool) && message.ReadEncoding() == EncodingBinary { + return EncodingBinary, writeBinaryWithTransformer(ctx, message, binaryWriter, transformers) + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// Write executes the full algorithm to encode a Message using transformers: +// 1. It first tries direct encoding using DirectWrite +// 2. If no direct encoding is possible, it uses ToEvent to generate an Event representation +// 3. From the Event, the message is encoded back to the provided structured or binary encoders +// You can tweak the encoding process using the context decorators WithForceStructured, WithForceStructured, etc. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingUnknown, ErrUnknownEncoding if message.ReadEncoding() == EncodingUnknown +// * _, err if error happened during the encoding +func Write( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + enc := message.ReadEncoding() + var err error + // Skip direct encoding if the event is an event message + if enc != EncodingEvent { + enc, err = DirectWrite(ctx, message, structuredWriter, binaryWriter, transformers...) + if enc != EncodingUnknown { + // Message directly encoded, nothing else to do here + return enc, err + } + } + + var e *event.Event + e, err = ToEvent(ctx, message, transformers...) + if err != nil { + return enc, err + } + + message = (*EventMessage)(e) + + if GetOrDefaultFromCtx(ctx, preferredEventEncoding, EncodingBinary).(Encoding) == EncodingStructured { + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + } else { + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// WithSkipDirectStructuredEncoding skips direct structured to structured encoding during the encoding process +func WithSkipDirectStructuredEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectStructuredEncoding, skip) +} + +// WithSkipDirectBinaryEncoding skips direct binary to binary encoding during the encoding process +func WithSkipDirectBinaryEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectBinaryEncoding, skip) +} + +// WithPreferredEventEncoding defines the preferred encoding from event to message during the encoding process +func WithPreferredEventEncoding(ctx context.Context, enc Encoding) context.Context { + return context.WithValue(ctx, preferredEventEncoding, enc) +} + +// WithForceStructured forces structured encoding during the encoding process +func WithForceStructured(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingStructured), skipDirectBinaryEncoding, true) +} + +// WithForceBinary forces binary encoding during the encoding process +func WithForceBinary(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingBinary), skipDirectStructuredEncoding, true) +} + +// GetOrDefaultFromCtx gets a configuration value from the provided context +func GetOrDefaultFromCtx(ctx context.Context, key interface{}, def interface{}) interface{} { + if val := ctx.Value(key); val != nil { + return val + } else { + return def + } +} + +func writeBinaryWithTransformer( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, + transformers Transformers, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + err = transformers.Transform(message.(MessageMetadataReader), binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} + +func writeBinary( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go new file mode 100644 index 00000000..452304ff --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -0,0 +1,295 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event event.Event) protocol.Result + + // Request will transmit the given event over the client's configured + // transport and return any response event. + Request(ctx context.Context, event event.Event) (*event.Event, protocol.Result) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying protocol as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) protocol.Result + // * func(event.Event) + // * func(event.Event) protocol.Result + // * func(context.Context, event.Event) + // * func(context.Context, event.Event) protocol.Result + // * func(event.Event) *event.Event + // * func(event.Event) (*event.Event, protocol.Result) + // * func(context.Context, event.Event) *event.Event + // * func(context.Context, event.Event) (*event.Event, protocol.Result) + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(obj interface{}, opts ...Option) (Client, error) { + c := &ceClient{ + // Running runtime.GOMAXPROCS(0) doesn't update the value, just returns the current one + pollGoroutines: runtime.GOMAXPROCS(0), + observabilityService: noopObservabilityService{}, + } + + if p, ok := obj.(protocol.Sender); ok { + c.sender = p + } + if p, ok := obj.(protocol.Requester); ok { + c.requester = p + } + if p, ok := obj.(protocol.Responder); ok { + c.responder = p + } + if p, ok := obj.(protocol.Receiver); ok { + c.receiver = p + } + if p, ok := obj.(protocol.Opener); ok { + c.opener = p + } + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + sender protocol.Sender + requester protocol.Requester + receiver protocol.Receiver + responder protocol.Responder + // Optional. + opener protocol.Opener + + observabilityService ObservabilityService + + inboundContextDecorators []func(context.Context, binding.Message) context.Context + outboundContextDecorators []func(context.Context) context.Context + invoker Invoker + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter + pollGoroutines int + blockingCallback bool + ackMalformedEvent bool +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + var err error + if c.sender == nil { + err = errors.New("sender not set") + return err + } + + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + if err = e.Validate(); err != nil { + return err + } + + // Event has been defaulted and validated, record we are going to perform send. + ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) + err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) + return err +} + +func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + var resp *event.Event + var err error + + if c.requester == nil { + err = errors.New("requester not set") + return nil, err + } + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err = e.Validate(); err != nil { + return nil, err + } + + // Event has been defaulted and validated, record we are going to perform request. + ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) + + // If provided a requester, use it to do request/response. + var msg binding.Message + msg, err = c.requester.Request(ctx, (*binding.EventMessage)(&e)) + if msg != nil { + defer func() { + if err := msg.Finish(err); err != nil { + cecontext.LoggerFrom(ctx).Warnw("failed calling message.Finish", zap.Error(err)) + } + }() + } + if protocol.IsUndelivered(err) { + return nil, err + } + + // try to turn msg into an event, it might not work and that is ok. + if rs, rserr := binding.ToEvent(ctx, msg); rserr != nil { + cecontext.LoggerFrom(ctx).Debugw("response: failed calling ToEvent", zap.Error(rserr), zap.Any("resp", msg)) + // If the protocol returns no error, it is an ACK on the request, but we had + // issues turning the response into an event, so make an ACK Result and pass + // down the ToEvent error as well. + err = protocol.NewReceipt(true, "failed to convert response into event: %v\n%w", rserr, err) + } else { + resp = rs + } + defer cb(err, resp) + return resp, err +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.invoker != nil { + return fmt.Errorf("client already has a receiver") + } + + invoker, err := newReceiveInvoker( + fn, + c.observabilityService, + c.inboundContextDecorators, + c.eventDefaulterFns, + c.ackMalformedEvent, + ) + if err != nil { + return err + } + if invoker.IsReceiver() && c.receiver == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Receiver supported by protocol") + } + if invoker.IsResponder() && c.responder == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Responder supported by protocol") + } + c.invoker = invoker + + if c.responder == nil && c.receiver == nil { + return errors.New("responder nor receiver set") + } + + defer func() { + c.invoker = nil + }() + + // Start Polling. + wg := sync.WaitGroup{} + for i := 0; i < c.pollGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + var msg binding.Message + var respFn protocol.ResponseFn + var err error + + if c.responder != nil { + msg, respFn, err = c.responder.Respond(ctx) + } else if c.receiver != nil { + msg, err = c.receiver.Receive(ctx) + respFn = noRespFn + } + + if err == io.EOF { // Normal close + return + } + + if err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while receiving a message: ", err) + continue + } + + callback := func() { + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) + } + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } + } + }() + } + + // Start the opener, if set. + if c.opener != nil { + if err = c.opener.OpenInbound(ctx); err != nil { + err = fmt.Errorf("error while opening the inbound connection: %w", err) + cancel() + } + } + + wg.Wait() + + return err +} + +// noRespFn is used to simply forward the protocol.Result for receivers that aren't responders +func noRespFn(_ context.Context, _ binding.Message, r protocol.Result, _ ...binding.Transformer) error { + return r +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go new file mode 100644 index 00000000..d48cc204 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -0,0 +1,35 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewHTTP provides the good defaults for the common case using an HTTP +// Protocol client. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewHTTP(opts ...http.Option) (Client, error) { + p, err := http.New(opts...) + if err != nil { + return nil, err + } + + c, err := New(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewDefault has been replaced by NewHTTP +// Deprecated. To get the same as NewDefault provided, please use NewHTTP with +// the observability service passed as an option, or client.NewClientHTTP from +// package github.com/cloudevents/sdk-go/observability/opencensus/v2/client +var NewDefault = NewHTTP diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go new file mode 100644 index 00000000..82985b8a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +// NewObserved produces a new client with the provided transport object and applied +// client options. +// Deprecated: This now has the same behaviour of New, and will be removed in future releases. +// As New, you must provide the observability service to use. +var NewObserved = New diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go new file mode 100644 index 00000000..7bfebf35 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/google/uuid" +) + +// EventDefaulter is the function signature for extensions that are able +// to perform event defaulting. +type EventDefaulter func(ctx context.Context, event event.Event) event.Event + +// DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to +// context.ID if it is found to be empty. +func DefaultIDToUUIDIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.ID() == "" { + event.Context = event.Context.Clone() + event.SetID(uuid.New().String()) + } + } + return event +} + +// DefaultTimeToNowIfNotSet will inspect the provided event and assign a new +// Timestamp to context.Time if it is found to be nil or zero. +func DefaultTimeToNowIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.Time().IsZero() { + event.Context = event.Context.Clone() + event.SetTime(time.Now()) + } + } + return event +} + +// NewDefaultDataContentTypeIfNotSet returns a defaulter that will inspect the +// provided event and set the provided content type if content type is found +// to be empty. +func NewDefaultDataContentTypeIfNotSet(contentType string) EventDefaulter { + return func(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.DataContentType() == "" { + event.SetDataContentType(contentType) + } + } + return event + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go new file mode 100644 index 00000000..e09962ce --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -0,0 +1,11 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps +a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method +registration. For full details, read the `client.Client` documentation. +*/ +package client diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go new file mode 100644 index 00000000..672581b5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + thttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "go.uber.org/zap" + "net/http" +) + +func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil, nil, false) //TODO(slinkydeveloper) maybe not nil? + if err != nil { + return nil, err + } + + return &EventReceiver{ + p: p, + invoker: invoker, + }, nil +} + +type EventReceiver struct { + p *thttp.Protocol + invoker Invoker +} + +func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // Prepare to handle the message if there's one (context cancellation will ensure this closes) + go func() { + ctx := req.Context() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Respond", zap.Error(err)) + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Invoke", zap.Error(err)) + } + }() + r.p.ServeHTTP(rw, req) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go new file mode 100644 index 00000000..a3080b00 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -0,0 +1,145 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type Invoker interface { + Invoke(context.Context, binding.Message, protocol.ResponseFn) error + IsReceiver() bool + IsResponder() bool +} + +var _ Invoker = (*receiveInvoker)(nil) + +func newReceiveInvoker( + fn interface{}, + observabilityService ObservabilityService, + inboundContextDecorators []func(context.Context, binding.Message) context.Context, + fns []EventDefaulter, + ackMalformedEvent bool, +) (Invoker, error) { + r := &receiveInvoker{ + eventDefaulterFns: fns, + observabilityService: observabilityService, + inboundContextDecorators: inboundContextDecorators, + ackMalformedEvent: ackMalformedEvent, + } + + if fn, err := receiver(fn); err != nil { + return nil, err + } else { + r.fn = fn + } + + return r, nil +} + +type receiveInvoker struct { + fn *receiverFn + observabilityService ObservabilityService + eventDefaulterFns []EventDefaulter + inboundContextDecorators []func(context.Context, binding.Message) context.Context + ackMalformedEvent bool +} + +func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { + defer func() { + err = m.Finish(err) + }() + + var respMsg binding.Message + var result protocol.Result + + e, eventErr := binding.ToEvent(ctx, m) + switch { + case eventErr != nil && r.fn.hasEventIn: + r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) + return respFn(ctx, nil, protocol.NewReceipt(r.ackMalformedEvent, "failed to convert Message to Event: %w", eventErr)) + case r.fn != nil: + // Check if event is valid before invoking the receiver function + if e != nil { + if validationErr := e.Validate(); validationErr != nil { + r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) + return respFn(ctx, nil, protocol.NewReceipt(r.ackMalformedEvent, "validation error in incoming event: %w", validationErr)) + } + } + + // Let's invoke the receiver fn + var resp *event.Event + resp, result = func() (resp *event.Event, result protocol.Result) { + defer func() { + if r := recover(); r != nil { + result = fmt.Errorf("call to Invoker.Invoke(...) has panicked: %v", r) + cecontext.LoggerFrom(ctx).Error(result) + } + }() + ctx = computeInboundContext(m, ctx, r.inboundContextDecorators) + + var cb func(error) + ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) + + resp, result = r.fn.invoke(ctx, e) + defer cb(result) + return + }() + + if respFn == nil { + break + } + + // Apply the defaulter chain to the outgoing event. + if resp != nil && len(r.eventDefaulterFns) > 0 { + for _, fn := range r.eventDefaulterFns { + *resp = fn(ctx, *resp) + } + // Validate the event conforms to the CloudEvents Spec. + if vErr := resp.Validate(); vErr != nil { + cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %v", vErr) + } + } + + // because binding.Message is an interface, casting a nil resp + // here would make future comparisons to nil false + if resp != nil { + respMsg = (*binding.EventMessage)(resp) + } + } + + if respFn == nil { + // let the protocol ACK based on the result + return result + } + + return respFn(ctx, respMsg, result) +} + +func (r *receiveInvoker) IsReceiver() bool { + return !r.fn.hasEventOut +} + +func (r *receiveInvoker) IsResponder() bool { + return r.fn.hasEventOut +} + +func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { + result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } + for _, f := range inboundContextDecorators { + result = f(result, message) + } + return result +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go new file mode 100644 index 00000000..75005d3b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// ObservabilityService is an interface users can implement to record metrics, create tracing spans, and plug other observability tools in the Client +type ObservabilityService interface { + // InboundContextDecorators is a method that returns the InboundContextDecorators that must be mounted in the Client to properly propagate some tracing informations. + InboundContextDecorators() []func(context.Context, binding.Message) context.Context + + // RecordReceivedMalformedEvent is invoked when an event was received but it's malformed or invalid. + RecordReceivedMalformedEvent(ctx context.Context, err error) + // RecordCallingInvoker is invoked before the user function is invoked. + // The returned callback will be invoked after the user finishes to process the event with the eventual processing error + // The error provided to the callback could be both a processing error, or a result + RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) + // RecordSendingEvent is invoked before the event is sent. + // The returned callback will be invoked when the response is received + // The error provided to the callback could be both a processing error, or a result + RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) + + // RecordRequestEvent is invoked before the event is requested. + // The returned callback will be invoked when the response is received + RecordRequestEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error, event *event.Event)) +} + +type noopObservabilityService struct{} + +func (n noopObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { + return nil +} + +func (n noopObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) {} + +func (n noopObservabilityService) RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordRequestEvent(ctx context.Context, e event.Event) (context.Context, func(errOrResult error, event *event.Event)) { + return ctx, func(errOrResult error, event *event.Event) {} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go new file mode 100644 index 00000000..44394be3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(interface{}) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + } + return nil + } +} + +func WithForceBinary() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceBinary) + } + return nil + } +} + +func WithForceStructured() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceStructured) + } + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + } + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + } + return nil + } +} + +// WithTracePropagation enables trace propagation via the distributed tracing +// extension. +// Deprecated: this is now noop and will be removed in future releases. +// Don't use distributed tracing extension to propagate traces: +// https://github.com/cloudevents/spec/blob/v1.0.1/extensions/distributed-tracing.md#using-the-distributed-tracing-extension +func WithTracePropagation() Option { + return func(i interface{}) error { + return nil + } +} + +// WithPollGoroutines configures how much goroutines should be used to +// poll the Receiver/Responder/Protocol implementations. +// Default value is GOMAXPROCS +func WithPollGoroutines(pollGoroutines int) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.pollGoroutines = pollGoroutines + } + return nil + } +} + +// WithObservabilityService configures the observability service to use +// to record traces and metrics +func WithObservabilityService(service ObservabilityService) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.observabilityService = service + c.inboundContextDecorators = append(c.inboundContextDecorators, service.InboundContextDecorators()...) + } + return nil + } +} + +// WithInboundContextDecorator configures a new inbound context decorator. +// Inbound context decorators are invoked to wrap additional informations from the binding.Message +// and propagate these informations in the context passed to the event receiver. +func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.inboundContextDecorators = append(c.inboundContextDecorators, dec) + } + return nil + } +} + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} + +// WithAckMalformedevents causes malformed events received within StartReceiver to be acknowledged +// rather than being permanently not-acknowledged. This can be useful when a protocol does not +// provide a responder implementation and would otherwise cause the receiver to be partially or +// fully stuck. +func WithAckMalformedEvent() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.ackMalformedEvent = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go new file mode 100644 index 00000000..2cc0e649 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -0,0 +1,193 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// ReceiveFull is the signature of a fn to be invoked for incoming cloudevents. +type ReceiveFull func(context.Context, event.Event) protocol.Result + +type receiverFn struct { + numIn int + numOut int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + + hasEventOut bool + hasResultOut bool +} + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, event.Event) ordered" + outParamUsage = "expected a function returning one or mode of (*event.Event, protocol.Result) ordered" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*event.Event)(nil)).Elem() + eventPtrType = reflect.TypeOf((*event.Event)(nil)) // want the ptr type + resultType = reflect.TypeOf((*protocol.Result)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() protocol.Result +// * func(context.Context) +// * func(context.Context) protocol.Result +// * func(event.Event) +// * func(event.Event) transport.Result +// * func(context.Context, event.Event) +// * func(context.Context, event.Event) protocol.Result +// * func(event.Event) *event.Event +// * func(event.Event) (*event.Event, protocol.Result) +// * func(context.Context, event.Event) *event.Event +// * func(context.Context, event.Event) (*event.Event, protocol.Result) +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + numOut: fnType.NumOut(), + } + + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, e *event.Event) (*event.Event, protocol.Result) { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(*e)) + } + } + v := r.fnValue.Call(args) + var respOut protocol.Result + var eOut *event.Event + if r.numOut > 0 { + i := 0 + if r.hasEventOut { + if eo, ok := v[i].Interface().(*event.Event); ok { + eOut = eo + } + i++ // <-- note, need to inc i. + } + if r.hasResultOut { + if resp, ok := v[i].Interface().(protocol.Result); ok { + respOut = resp + } + } + } + return eOut, respOut +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, event.Event in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + + switch fnType.NumIn() { + case 2: + // has to be (context.Context, event.Event) + if !eventType.ConvertibleTo(fnType.In(1)) { + return fmt.Errorf("%s; cannot convert parameter 2 to %s from event.Event", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + fallthrough + case 1: + if !contextType.ConvertibleTo(fnType.In(0)) { + if !eventType.ConvertibleTo(fnType.In(0)) { + return fmt.Errorf("%s; cannot convert parameter 1 to %s from context.Context or event.Event", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures to be [0, all] of +// *event.Event, transport.Result in this order +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasEventOut = false + r.hasResultOut = false + + switch fnType.NumOut() { + case 2: + // has to be (*event.Event, transport.Result) + if !fnType.Out(1).ConvertibleTo(resultType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Response", outParamUsage, fnType.Out(1)) + } else { + r.hasResultOut = true + } + fallthrough + case 1: + if !fnType.Out(0).ConvertibleTo(resultType) { + if !fnType.Out(0).ConvertibleTo(eventPtrType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to *event.Event or transport.Result", outParamUsage, fnType.Out(0)) + } else { + r.hasEventOut = true + } + } else if r.hasResultOut { + return fmt.Errorf("%s; duplicate parameter of type event.Response", outParamUsage) + } else { + r.hasResultOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go new file mode 100644 index 00000000..fc9ef031 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "net/url" + "time" +) + +// Opaque key type used to store target +type targetKeyType struct{} + +var targetKey = targetKeyType{} + +// WithTarget returns back a new context with the given target. Target is intended to be transport dependent. +// For http transport, `target` should be a full URL and will be injected into the outbound http request. +func WithTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, targetKey, target) +} + +// TargetFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func TargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(targetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} + +// Opaque key type used to store topic +type topicKeyType struct{} + +var topicKey = topicKeyType{} + +// WithTopic returns back a new context with the given topic. Topic is intended to be transport dependent. +// For pubsub transport, `topic` should be a Pub/Sub Topic ID. +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicKey, topic) +} + +// TopicFrom looks in the given context and returns `topic` as a string if found and valid, otherwise "". +func TopicFrom(ctx context.Context) string { + c := ctx.Value(topicKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} + +// Opaque key type used to store retry parameters +type retriesKeyType struct{} + +var retriesKey = retriesKeyType{} + +// WithRetriesConstantBackoff returns back a new context with retries parameters using constant backoff strategy. +// MaxTries is the maximum number for retries and delay is the time interval between retries +func WithRetriesConstantBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyConstant, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesLinearBackoff returns back a new context with retries parameters using linear backoff strategy. +// MaxTries is the maximum number for retries and delay*tries is the time interval between retries +func WithRetriesLinearBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyLinear, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesExponentialBackoff returns back a new context with retries parameters using exponential backoff strategy. +// MaxTries is the maximum number for retries and period is the amount of time to wait, used as `period * 2^retries`. +func WithRetriesExponentialBackoff(ctx context.Context, period time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyExponential, + Period: period, + MaxTries: maxTries, + }) +} + +// WithRetryParams returns back a new context with retries parameters. +func WithRetryParams(ctx context.Context, rp *RetryParams) context.Context { + return context.WithValue(ctx, retriesKey, rp) +} + +// RetriesFrom looks in the given context and returns the retries parameters if found. +// Otherwise returns the default retries configuration (ie. no retries). +func RetriesFrom(ctx context.Context) *RetryParams { + c := ctx.Value(retriesKey) + if c != nil { + if s, ok := c.(*RetryParams); ok { + return s + } + } + return &DefaultRetryParams +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 00000000..434a4da7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go new file mode 100644 index 00000000..0b2dcaf7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to +context.Context objects. +*/ +package context diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go new file mode 100644 index 00000000..b3087a79 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + + "go.uber.org/zap" +) + +// Opaque key type used to store logger +type loggerKeyType struct{} + +var loggerKey = loggerKeyType{} + +// fallbackLogger is the logger is used when there is no logger attached to the context. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a new context with the logger injected into the given context. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + if logger == nil { + return context.WithValue(ctx, loggerKey, fallbackLogger) + } + return context.WithValue(ctx, loggerKey, logger) +} + +// LoggerFrom returns the logger stored in context. +func LoggerFrom(ctx context.Context) *zap.SugaredLogger { + l := ctx.Value(loggerKey) + if l != nil { + if logger, ok := l.(*zap.SugaredLogger); ok { + return logger + } + } + return fallbackLogger +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go new file mode 100644 index 00000000..ec17df72 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -0,0 +1,76 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "errors" + "math" + "time" +) + +type BackoffStrategy string + +const ( + BackoffStrategyNone = "none" + BackoffStrategyConstant = "constant" + BackoffStrategyLinear = "linear" + BackoffStrategyExponential = "exponential" +) + +var DefaultRetryParams = RetryParams{Strategy: BackoffStrategyNone} + +// RetryParams holds parameters applied to retries +type RetryParams struct { + // Strategy is the backoff strategy to applies between retries + Strategy BackoffStrategy + + // MaxTries is the maximum number of times to retry request before giving up + MaxTries int + + // Period is + // - for none strategy: no delay + // - for constant strategy: the delay interval between retries + // - for linear strategy: interval between retries = Period * retries + // - for exponential strategy: interval between retries = Period * retries^2 + Period time.Duration +} + +// BackoffFor tries will return the time duration that should be used for this +// current try count. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) BackoffFor(tries int) time.Duration { + switch r.Strategy { + case BackoffStrategyConstant: + return r.Period + case BackoffStrategyLinear: + return r.Period * time.Duration(tries) + case BackoffStrategyExponential: + exp := math.Exp2(float64(tries)) + return r.Period * time.Duration(exp) + case BackoffStrategyNone: + fallthrough // default + default: + return r.Period + } +} + +// Backoff is a blocking call to wait for the correct amount of time for the retry. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) Backoff(ctx context.Context, tries int) error { + if tries > r.MaxTries { + return errors.New("too many retries") + } + ticker := time.NewTicker(r.BackoffFor(tries)) + select { + case <-ctx.Done(): + ticker.Stop() + return errors.New("context has been cancelled") + case <-ticker.C: + ticker.Stop() + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go new file mode 100644 index 00000000..a49522f8 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -0,0 +1,47 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + TextPlain = "text/plain" + TextJSON = "text/json" + ApplicationJSON = "application/json" + ApplicationXML = "application/xml" + ApplicationCloudEventsJSON = "application/cloudevents+json" + ApplicationCloudEventsBatchJSON = "application/cloudevents-batch+json" +) + +// StringOfApplicationJSON returns a string pointer to "application/json" +func StringOfApplicationJSON() *string { + a := ApplicationJSON + return &a +} + +// StringOfApplicationXML returns a string pointer to "application/xml" +func StringOfApplicationXML() *string { + a := ApplicationXML + return &a +} + +// StringOfTextPlain returns a string pointer to "text/plain" +func StringOfTextPlain() *string { + a := TextPlain + return &a +} + +// StringOfApplicationCloudEventsJSON returns a string pointer to +// "application/cloudevents+json" +func StringOfApplicationCloudEventsJSON() *string { + a := ApplicationCloudEventsJSON + return &a +} + +// StringOfApplicationCloudEventsBatchJSON returns a string pointer to +// "application/cloudevents-batch+json" +func StringOfApplicationCloudEventsBatchJSON() *string { + a := ApplicationCloudEventsBatchJSON + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go new file mode 100644 index 00000000..cf215269 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -0,0 +1,16 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + Base64 = "base64" +) + +// StringOfBase64 returns a string pointer to "Base64" +func StringOfBase64() *string { + a := Base64 + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go new file mode 100644 index 00000000..3e077740 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -0,0 +1,78 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package datacodec + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" +) + +// Decoder is the expected function signature for decoding `in` to `out`. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +type Decoder func(ctx context.Context, in []byte, out interface{}) error + +// Encoder is the expected function signature for encoding `in` to bytes. +// Returns an error if the encoder has an issue encoding `in`. +type Encoder func(ctx context.Context, in interface{}) ([]byte, error) + +var decoder map[string]Decoder +var encoder map[string]Encoder + +func init() { + decoder = make(map[string]Decoder, 10) + encoder = make(map[string]Encoder, 10) + + AddDecoder("", json.Decode) + AddDecoder("application/json", json.Decode) + AddDecoder("text/json", json.Decode) + AddDecoder("application/xml", xml.Decode) + AddDecoder("text/xml", xml.Decode) + AddDecoder("text/plain", text.Decode) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.Encode) + AddEncoder("text/json", json.Encode) + AddEncoder("application/xml", xml.Encode) + AddEncoder("text/xml", xml.Encode) + AddEncoder("text/plain", text.Encode) +} + +// AddDecoder registers a decoder for a given content type. The codecs will use +// these to decode the data payload from a cloudevent.Event object. +func AddDecoder(contentType string, fn Decoder) { + decoder[contentType] = fn +} + +// AddEncoder registers an encoder for a given content type. The codecs will +// use these to encode the data payload for a cloudevent.Event object. +func AddEncoder(contentType string, fn Encoder) { + encoder[contentType] = fn +} + +// Decode looks up and invokes the decoder registered for the given content +// type. An error is returned if no decoder is registered for the given +// content type. +func Decode(ctx context.Context, contentType string, in []byte, out interface{}) error { + if fn, ok := decoder[contentType]; ok { + return fn(ctx, in, out) + } + return fmt.Errorf("[decode] unsupported content type: %q", contentType) +} + +// Encode looks up and invokes the encoder registered for the given content +// type. An error is returned if no encoder is registered for the given +// content type. +func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + if fn, ok := encoder[contentType]; ok { + return fn(ctx, in) + } + return nil, fmt.Errorf("[encode] unsupported content type: %q", contentType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go new file mode 100644 index 00000000..b681af88 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as +`application/json` and `application/xml`. +*/ +package datacodec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go new file mode 100644 index 00000000..734ade59 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -0,0 +1,56 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + if err := json.Unmarshal(in, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(in), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go new file mode 100644 index 00000000..33e1323c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package json holds the encoder/decoder implementation for `application/json`. +*/ +package json diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go new file mode 100644 index 00000000..761a1011 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -0,0 +1,30 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package text + +import ( + "context" + "fmt" +) + +// Text codec converts []byte or string to string and vice-versa. + +func Decode(_ context.Context, in []byte, out interface{}) error { + p, _ := out.(*string) + if p == nil { + return fmt.Errorf("text.Decode out: want *string, got %T", out) + } + *p = string(in) + return nil +} + +func Encode(_ context.Context, in interface{}) ([]byte, error) { + s, ok := in.(string) + if !ok { + return nil, fmt.Errorf("text.Encode in: want string, got %T", in) + } + return []byte(s), nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go new file mode 100644 index 00000000..af10577a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package text holds the encoder/decoder implementation for `text/plain`. +*/ +package text diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go new file mode 100644 index 00000000..de68ec3d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -0,0 +1,40 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package xml + +import ( + "context" + "encoding/xml" + "fmt" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + + if err := xml.Unmarshal(in, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(in)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go new file mode 100644 index 00000000..c8d73213 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package xml holds the encoder/decoder implementation for `application/xml`. +*/ +package xml diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go new file mode 100644 index 00000000..31c22ce6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. +*/ +package event diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go new file mode 100644 index 00000000..52495f9a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/json" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + DataEncoded []byte + // DataBase64 indicates if the event, when serialized, represents + // the data field using the base64 encoding. + // In v0.3, this field is superseded by DataContentEncoding + DataBase64 bool + FieldErrors map[string]error +} + +const ( + defaultEventVersion = CloudEventsVersionV1 +) + +func (e *Event) fieldError(field string, err error) { + if e.FieldErrors == nil { + e.FieldErrors = make(map[string]error) + } + e.FieldErrors[field] = err +} + +func (e *Event) fieldOK(field string) { + if e.FieldErrors != nil { + delete(e.FieldErrors, field) + } +} + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 1.0 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// ExtensionAs is deprecated: access extensions directly via the e.Extensions() map. +// Use functions in the types package to convert extension values. +// For example replace this: +// +// var i int +// err := e.ExtensionAs("foo", &i) +// +// With this: +// +// i, err := types.ToInteger(e.Extensions["foo"]) +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString(e.Context.String()) + + if e.DataEncoded != nil { + if e.DataBase64 { + b.WriteString("Data (binary),\n ") + } else { + b.WriteString("Data,\n ") + } + switch e.DataMediaType() { + case ApplicationJSON: + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, e.DataEncoded, " ", " ") + if err != nil { + b.Write(e.DataEncoded) + } else { + b.Write(prettyJSON.Bytes()) + } + default: + b.Write(e.DataEncoded) + } + b.WriteString("\n") + } + + return b.String() +} + +func (e Event) Clone() Event { + out := Event{} + out.Context = e.Context.Clone() + out.DataEncoded = cloneBytes(e.DataEncoded) + out.DataBase64 = e.DataBase64 + out.FieldErrors = e.cloneFieldErrors() + return out +} + +func cloneBytes(in []byte) []byte { + if in == nil { + return nil + } + out := make([]byte, len(in)) + copy(out, in) + return out +} + +func (e Event) cloneFieldErrors() map[string]error { + if e.FieldErrors == nil { + return nil + } + newFE := make(map[string]error, len(e.FieldErrors)) + for k, v := range e.FieldErrors { + newFE[k] = v + } + return newFE +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go new file mode 100644 index 00000000..8fc449ed --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -0,0 +1,118 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/v2/event/datacodec" +) + +// SetData encodes the given payload with the given content type. +// If the provided payload is a byte array, when marshalled to json it will be encoded as base64. +// If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a +// marshalling to byte array. +func (e *Event) SetData(contentType string, obj interface{}) error { + e.SetDataContentType(contentType) + + if e.SpecVersion() != CloudEventsVersionV1 { + return e.legacySetData(obj) + } + + // Version 1.0 and above. + switch obj := obj.(type) { + case []byte: + e.DataEncoded = obj + e.DataBase64 = true + default: + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + + return nil +} + +// Deprecated: Delete when we do not have to support Spec v0.3. +func (e *Event) legacySetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DeprecatedDataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.DataEncoded = buf + e.DataBase64 = false + } else { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + return nil +} + +const ( + quotes = `"'` +) + +func (e Event) Data() []byte { + return e.DataEncoded +} + +// DataAs attempts to populate the provided data object with the event payload. +// obj should be a pointer type. +func (e Event) DataAs(obj interface{}) error { + data := e.Data() + + if len(data) == 0 { + // No data. + return nil + } + + if e.SpecVersion() != CloudEventsVersionV1 { + var err error + if data, err = e.legacyConvertData(data); err != nil { + return err + } + } + + return datacodec.Decode(context.Background(), e.DataMediaType(), data, obj) +} + +func (e Event) legacyConvertData(data []byte) ([]byte, error) { + if e.Context.DeprecatedGetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if data[0] == quotes[0] || data[0] == quotes[1] { + str, err := strconv.Unquote(string(data)) + if err != nil { + return nil, err + } + bs = []byte(str) + } else { + bs = data + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return nil, fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + data = buf[:n] + } + + return data, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go new file mode 100644 index 00000000..2809fed5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -0,0 +1,102 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +// EventReader is the interface for reading through an event from attributes. +type EventReader interface { + // SpecVersion returns event.Context.GetSpecVersion(). + SpecVersion() string + // Type returns event.Context.GetType(). + Type() string + // Source returns event.Context.GetSource(). + Source() string + // Subject returns event.Context.GetSubject(). + Subject() string + // ID returns event.Context.GetID(). + ID() string + // Time returns event.Context.GetTime(). + Time() time.Time + // DataSchema returns event.Context.GetDataSchema(). + DataSchema() string + // DataContentType returns event.Context.GetDataContentType(). + DataContentType() string + // DataMediaType returns event.Context.GetDataMediaType(). + DataMediaType() string + // DeprecatedDataContentEncoding returns event.Context.DeprecatedGetDataContentEncoding(). + DeprecatedDataContentEncoding() string + + // Extension Attributes + + // Extensions returns the event.Context.GetExtensions(). + // Extensions use the CloudEvents type system, details in package cloudevents/types. + Extensions() map[string]interface{} + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + // + // DEPRECATED: Access extensions directly via the e.Extensions() map. + // Use functions in the types package to convert extension values. + // For example replace this: + // + // var i int + // err := e.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(e.Extensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // Data Attribute + + // Data returns the raw data buffer + // If the event was encoded with base64 encoding, Data returns the already decoded + // byte array + Data() []byte + + // DataAs attempts to populate the provided data object with the event payload. + DataAs(interface{}) error +} + +// EventWriter is the interface for writing through an event onto attributes. +// If an error is thrown by a sub-component, EventWriter caches the error +// internally and exposes errors with a call to event.Validate(). +type EventWriter interface { + // Context Attributes + + // SetSpecVersion performs event.Context.SetSpecVersion. + SetSpecVersion(string) + // SetType performs event.Context.SetType. + SetType(string) + // SetSource performs event.Context.SetSource. + SetSource(string) + // SetSubject( performs event.Context.SetSubject. + SetSubject(string) + // SetID performs event.Context.SetID. + SetID(string) + // SetTime performs event.Context.SetTime. + SetTime(time.Time) + // SetDataSchema performs event.Context.SetDataSchema. + SetDataSchema(string) + // SetDataContentType performs event.Context.SetDataContentType. + SetDataContentType(string) + // DeprecatedSetDataContentEncoding performs event.Context.DeprecatedSetDataContentEncoding. + SetDataContentEncoding(string) + + // Extension Attributes + + // SetExtension performs event.Context.SetExtension. + SetExtension(string, interface{}) + + // SetData encodes the given payload with the given content type. + // If the provided payload is a byte array, when marshalled to json it will be encoded as base64. + // If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a + // marshalling to byte array. + SetData(string, interface{}) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go new file mode 100644 index 00000000..c5f2dc03 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -0,0 +1,203 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +// WriteJson writes the in event in the provided writer. +// Note: this function assumes the input event is valid. +func WriteJson(in *Event, writer io.Writer) error { + stream := jsoniter.ConfigFastest.BorrowStream(writer) + defer jsoniter.ConfigFastest.ReturnStream(stream) + stream.WriteObjectStart() + + var ext map[string]interface{} + var dct *string + var isBase64 bool + + // Write the context (without the extensions) + switch eventContext := in.Context.(type) { + case *EventContextV03: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV03) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentEncoding != nil { + isBase64 = true + stream.WriteMore() + stream.WriteObjectField("datacontentencoding") + stream.WriteString(*eventContext.DataContentEncoding) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.SchemaURL != nil { + stream.WriteMore() + stream.WriteObjectField("schemaurl") + stream.WriteString(eventContext.SchemaURL.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + case *EventContextV1: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + isBase64 = in.DataBase64 + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV1) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.DataSchema != nil { + stream.WriteMore() + stream.WriteObjectField("dataschema") + stream.WriteString(eventContext.DataSchema.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + default: + return fmt.Errorf("missing event context") + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event attributes: %w", stream.Error) + } + + // Let's write the body + if in.DataEncoded != nil { + stream.WriteMore() + + // We need to figure out the media type first + var mediaType string + if dct == nil { + mediaType = ApplicationJSON + } else { + // This code is required to extract the media type from the full content type string (which might contain encoding and stuff) + contentType := *dct + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + mediaType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + } + + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON + + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !isBase64 { + stream.WriteObjectField("data") + _, err := stream.Write(in.DataEncoded) + if err != nil { + return fmt.Errorf("error while writing data: %w", err) + } + } else { + if in.Context.GetSpecVersion() == CloudEventsVersionV1 && isBase64 { + stream.WriteObjectField("data_base64") + } else { + stream.WriteObjectField("data") + } + // At this point of we need to write to base 64 string, or we just need to write the plain string + if isBase64 { + stream.WriteString(base64.StdEncoding.EncodeToString(in.DataEncoded)) + } else { + stream.WriteString(string(in.DataEncoded)) + } + } + + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event data: %w", stream.Error) + } + + for k, v := range ext { + stream.WriteMore() + stream.WriteObjectField(k) + stream.WriteVal(v) + } + + stream.WriteObjectEnd() + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event extensions: %w", stream.Error) + } + return stream.Flush() +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := WriteJson(&e, &buf) + return buf.Bytes(), err +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go new file mode 100644 index 00000000..9d1aeeb6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +var _ EventReader = (*Event)(nil) + +// SpecVersion implements EventReader.SpecVersion +func (e Event) SpecVersion() string { + if e.Context != nil { + return e.Context.GetSpecVersion() + } + return "" +} + +// Type implements EventReader.Type +func (e Event) Type() string { + if e.Context != nil { + return e.Context.GetType() + } + return "" +} + +// Source implements EventReader.Source +func (e Event) Source() string { + if e.Context != nil { + return e.Context.GetSource() + } + return "" +} + +// Subject implements EventReader.Subject +func (e Event) Subject() string { + if e.Context != nil { + return e.Context.GetSubject() + } + return "" +} + +// ID implements EventReader.ID +func (e Event) ID() string { + if e.Context != nil { + return e.Context.GetID() + } + return "" +} + +// Time implements EventReader.Time +func (e Event) Time() time.Time { + if e.Context != nil { + return e.Context.GetTime() + } + return time.Time{} +} + +// DataSchema implements EventReader.DataSchema +func (e Event) DataSchema() string { + if e.Context != nil { + return e.Context.GetDataSchema() + } + return "" +} + +// DataContentType implements EventReader.DataContentType +func (e Event) DataContentType() string { + if e.Context != nil { + return e.Context.GetDataContentType() + } + return "" +} + +// DataMediaType returns the parsed DataMediaType of the event. If parsing +// fails, the empty string is returned. To retrieve the parsing error, use +// `Context.GetDataMediaType` instead. +func (e Event) DataMediaType() string { + if e.Context != nil { + mediaType, _ := e.Context.GetDataMediaType() + return mediaType + } + return "" +} + +// DeprecatedDataContentEncoding implements EventReader.DeprecatedDataContentEncoding +func (e Event) DeprecatedDataContentEncoding() string { + if e.Context != nil { + return e.Context.DeprecatedGetDataContentEncoding() + } + return "" +} + +// Extensions implements EventReader.Extensions +func (e Event) Extensions() map[string]interface{} { + if e.Context != nil { + return e.Context.GetExtensions() + } + return map[string]interface{}(nil) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go new file mode 100644 index 00000000..0dd88ae5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -0,0 +1,480 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + + jsoniter "github.com/json-iterator/go" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const specVersionV03Flag uint8 = 1 << 4 +const specVersionV1Flag uint8 = 1 << 5 +const dataBase64Flag uint8 = 1 << 6 +const dataContentTypeFlag uint8 = 1 << 7 + +func checkFlag(state uint8, flag uint8) bool { + return state&flag != 0 +} + +func appendFlag(state *uint8, flag uint8) { + *state = (*state) | flag +} + +var iterPool = sync.Pool{ + New: func() interface{} { + return jsoniter.Parse(jsoniter.ConfigFastest, nil, 1024) + }, +} + +func borrowIterator(reader io.Reader) *jsoniter.Iterator { + iter := iterPool.Get().(*jsoniter.Iterator) + iter.Reset(reader) + return iter +} + +func returnIterator(iter *jsoniter.Iterator) { + iter.Error = nil + iter.Attachment = nil + iterPool.Put(iter) +} + +func ReadJson(out *Event, reader io.Reader) error { + iterator := borrowIterator(reader) + defer returnIterator(iterator) + + return readJsonFromIterator(out, iterator) +} + +// ReadJson allows you to read the bytes reader as an event +func readJsonFromIterator(out *Event, iterator *jsoniter.Iterator) error { + // Parsing dependency graph: + // SpecVersion + // ^ ^ + // | +--------------+ + // + + + // All Attributes datacontenttype (and datacontentencoding for v0.3) + // (except datacontenttype) ^ + // | + // | + // + + // Data + + var state uint8 = 0 + var cachedData []byte + + var ( + // Universally parseable fields. + id string + typ string + source types.URIRef + subject *string + time *types.Timestamp + datacontenttype *string + extensions = make(map[string]interface{}) + + // These fields require knowledge about the specversion to be parsed. + schemaurl jsoniter.Any + datacontentencoding jsoniter.Any + dataschema jsoniter.Any + dataBase64 jsoniter.Any + ) + + for key := iterator.ReadObject(); key != ""; key = iterator.ReadObject() { + // Check if we have some error in our error cache + if iterator.Error != nil { + return iterator.Error + } + + // We have a key, now we need to figure out what to do + // depending on the parsing state + + // If it's a specversion, trigger state change + if key == "specversion" { + if checkFlag(state, specVersionV1Flag|specVersionV03Flag) { + return fmt.Errorf("specversion was already provided") + } + sv := iterator.ReadString() + + // Check proper specversion + switch sv { + case CloudEventsVersionV1: + con := &EventContextV1{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + + // Add the fields relevant for the version ... + if dataschema != nil { + var err error + con.DataSchema, err = toUriPtr(dataschema) + if err != nil { + return err + } + } + if dataBase64 != nil { + stream := jsoniter.ConfigFastest.BorrowStream(nil) + defer jsoniter.ConfigFastest.ReturnStream(stream) + dataBase64.WriteTo(stream) + cachedData = stream.Buffer() + if stream.Error != nil { + return stream.Error + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if schemaurl != nil { + extensions["schemaurl"] = schemaurl.GetInterface() + } + if datacontentencoding != nil { + extensions["datacontentencoding"] = datacontentencoding.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV1Flag) + case CloudEventsVersionV03: + con := &EventContextV03{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + var err error + // Add the fields relevant for the version ... + if schemaurl != nil { + con.SchemaURL, err = toUriRefPtr(schemaurl) + if err != nil { + return err + } + } + if datacontentencoding != nil { + con.DataContentEncoding, err = toStrPtr(datacontentencoding) + if *con.DataContentEncoding != Base64 { + err = ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + if err != nil { + return err + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if dataschema != nil { + extensions["dataschema"] = dataschema.GetInterface() + } + if dataBase64 != nil { + extensions["data_base64"] = dataBase64.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV03Flag) + default: + return ValidationError{"specversion": errors.New("unknown value: " + sv)} + } + + // Apply all extensions to the context object. + for key, val := range extensions { + if err := out.Context.SetExtension(key, val); err != nil { + return err + } + } + continue + } + + // If no specversion ... + if !checkFlag(state, specVersionV03Flag|specVersionV1Flag) { + switch key { + case "id": + id = iterator.ReadString() + case "type": + typ = iterator.ReadString() + case "source": + source = readUriRef(iterator) + case "subject": + subject = readStrPtr(iterator) + case "time": + time = readTimestamp(iterator) + case "datacontenttype": + datacontenttype = readStrPtr(iterator) + appendFlag(&state, dataContentTypeFlag) + case "data": + cachedData = iterator.SkipAndReturnBytes() + case "data_base64": + dataBase64 = iterator.ReadAny() + case "dataschema": + dataschema = iterator.ReadAny() + case "schemaurl": + schemaurl = iterator.ReadAny() + case "datacontentencoding": + datacontentencoding = iterator.ReadAny() + default: + extensions[key] = iterator.Read() + } + continue + } + + // From this point downward -> we can assume the event has a context pointer non nil + + // If it's a datacontenttype, trigger state change + if key == "datacontenttype" { + if checkFlag(state, dataContentTypeFlag) { + return fmt.Errorf("datacontenttype was already provided") + } + + dct := iterator.ReadString() + + switch ctx := out.Context.(type) { + case *EventContextV03: + ctx.DataContentType = &dct + case *EventContextV1: + ctx.DataContentType = &dct + } + appendFlag(&state, dataContentTypeFlag) + continue + } + + // If it's a datacontentencoding and it's v0.3, trigger state change + if checkFlag(state, specVersionV03Flag) && key == "datacontentencoding" { + if checkFlag(state, dataBase64Flag) { + return ValidationError{"datacontentencoding": errors.New("datacontentencoding was specified twice")} + } + + dce := iterator.ReadString() + + if dce != Base64 { + return ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + + out.Context.(*EventContextV03).DataContentEncoding = &dce + appendFlag(&state, dataBase64Flag) + continue + } + + // We can parse all attributes, except data. + // If it's data or data_base64 and we don't have the attributes to process it, then we cache it + // The expanded form of this condition is: + // (checkFlag(state, specVersionV1Flag) && !checkFlag(state, dataContentTypeFlag) && (key == "data" || key == "data_base64")) || + // (checkFlag(state, specVersionV03Flag) && !(checkFlag(state, dataContentTypeFlag) && checkFlag(state, dataBase64Flag)) && key == "data") + if (state&(specVersionV1Flag|dataContentTypeFlag) == specVersionV1Flag && (key == "data" || key == "data_base64")) || + ((state&specVersionV03Flag == specVersionV03Flag) && (state&(dataContentTypeFlag|dataBase64Flag) != (dataContentTypeFlag | dataBase64Flag)) && key == "data") { + if key == "data_base64" { + appendFlag(&state, dataBase64Flag) + } + cachedData = iterator.SkipAndReturnBytes() + continue + } + + // At this point or this value is an attribute (excluding datacontenttype and datacontentencoding), or this value is data and this condition is valid: + // (specVersionV1Flag & dataContentTypeFlag) || (specVersionV03Flag & dataContentTypeFlag & dataBase64Flag) + switch eventContext := out.Context.(type) { + case *EventContextV03: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "schemaurl": + eventContext.SchemaURL = readUriRefPtr(iterator) + case "data": + iterator.Error = consumeData(out, checkFlag(state, dataBase64Flag), iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + case *EventContextV1: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "dataschema": + eventContext.DataSchema = readUriPtr(iterator) + case "data": + iterator.Error = consumeData(out, false, iterator) + case "data_base64": + iterator.Error = consumeData(out, true, iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + } + } + + if state&(specVersionV03Flag|specVersionV1Flag) == 0 { + return ValidationError{"specversion": errors.New("no specversion")} + } + + if iterator.Error != nil { + return iterator.Error + } + + // If there is a dataToken cached, we always defer at the end the processing + // because nor datacontenttype or datacontentencoding are mandatory. + if cachedData != nil { + return consumeDataAsBytes(out, checkFlag(state, dataBase64Flag), cachedData) + } + return nil +} + +func consumeDataAsBytes(e *Event, isBase64 bool, b []byte) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := b[1 : len(b)-1] // remove quotes + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + // Empty content type assumes json + if mt != "" && mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, b) + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = b + return nil +} + +func consumeData(e *Event, isBase64 bool, iter *jsoniter.Iterator) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := iter.ReadStringAsSlice() + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + if mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = iter.SkipAndReturnBytes() + return nil +} + +func readUriRef(iter *jsoniter.Iterator) types.URIRef { + str := iter.ReadString() + uriRef := types.ParseURIRef(str) + if uriRef == nil { + iter.Error = fmt.Errorf("cannot parse uri ref: %v", str) + return types.URIRef{} + } + return *uriRef +} + +func readStrPtr(iter *jsoniter.Iterator) *string { + str := iter.ReadString() + if str == "" { + return nil + } + return &str +} + +func readUriRefPtr(iter *jsoniter.Iterator) *types.URIRef { + return types.ParseURIRef(iter.ReadString()) +} + +func readUriPtr(iter *jsoniter.Iterator) *types.URI { + return types.ParseURI(iter.ReadString()) +} + +func readTimestamp(iter *jsoniter.Iterator) *types.Timestamp { + t, err := types.ParseTimestamp(iter.ReadString()) + if err != nil { + iter.Error = err + } + return t +} + +func toStrPtr(val jsoniter.Any) (*string, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + if str == "" { + return nil, nil + } + return &str, nil +} + +func toUriRefPtr(val jsoniter.Any) (*types.URIRef, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURIRef(str), nil +} + +func toUriPtr(val jsoniter.Any) (*types.URI, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURI(str), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + iterator := jsoniter.ConfigFastest.BorrowIterator(b) + defer jsoniter.ConfigFastest.ReturnIterator(iterator) + return readJsonFromIterator(e, iterator) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go new file mode 100644 index 00000000..958ecc47 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" +) + +type ValidationError map[string]error + +func (e ValidationError) Error() string { + b := strings.Builder{} + for k, v := range e { + b.WriteString(k) + b.WriteString(": ") + b.WriteString(v.Error()) + b.WriteRune('\n') + } + return b.String() +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return ValidationError{"specversion": fmt.Errorf("missing Event.Context")} + } + + errs := map[string]error{} + if e.FieldErrors != nil { + for k, v := range e.FieldErrors { + errs[k] = v + } + } + + if fieldErrors := e.Context.Validate(); fieldErrors != nil { + for k, v := range fieldErrors { + errs[k] = v + } + } + + if len(errs) > 0 { + return ValidationError(errs) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go new file mode 100644 index 00000000..ddfb1be3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -0,0 +1,117 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "time" +) + +var _ EventWriter = (*Event)(nil) + +// SetSpecVersion implements EventWriter.SetSpecVersion +func (e *Event) SetSpecVersion(v string) { + switch v { + case CloudEventsVersionV03: + if e.Context == nil { + e.Context = &EventContextV03{} + } else { + e.Context = e.Context.AsV03() + } + case CloudEventsVersionV1: + if e.Context == nil { + e.Context = &EventContextV1{} + } else { + e.Context = e.Context.AsV1() + } + default: + e.fieldError("specversion", fmt.Errorf("a valid spec version is required: [%s, %s]", + CloudEventsVersionV03, CloudEventsVersionV1)) + return + } + e.fieldOK("specversion") +} + +// SetType implements EventWriter.SetType +func (e *Event) SetType(t string) { + if err := e.Context.SetType(t); err != nil { + e.fieldError("type", err) + } else { + e.fieldOK("type") + } +} + +// SetSource implements EventWriter.SetSource +func (e *Event) SetSource(s string) { + if err := e.Context.SetSource(s); err != nil { + e.fieldError("source", err) + } else { + e.fieldOK("source") + } +} + +// SetSubject implements EventWriter.SetSubject +func (e *Event) SetSubject(s string) { + if err := e.Context.SetSubject(s); err != nil { + e.fieldError("subject", err) + } else { + e.fieldOK("subject") + } +} + +// SetID implements EventWriter.SetID +func (e *Event) SetID(id string) { + if err := e.Context.SetID(id); err != nil { + e.fieldError("id", err) + } else { + e.fieldOK("id") + } +} + +// SetTime implements EventWriter.SetTime +func (e *Event) SetTime(t time.Time) { + if err := e.Context.SetTime(t); err != nil { + e.fieldError("time", err) + } else { + e.fieldOK("time") + } +} + +// SetDataSchema implements EventWriter.SetDataSchema +func (e *Event) SetDataSchema(s string) { + if err := e.Context.SetDataSchema(s); err != nil { + e.fieldError("dataschema", err) + } else { + e.fieldOK("dataschema") + } +} + +// SetDataContentType implements EventWriter.SetDataContentType +func (e *Event) SetDataContentType(ct string) { + if err := e.Context.SetDataContentType(ct); err != nil { + e.fieldError("datacontenttype", err) + } else { + e.fieldOK("datacontenttype") + } +} + +// SetDataContentEncoding is deprecated. Implements EventWriter.SetDataContentEncoding. +func (e *Event) SetDataContentEncoding(enc string) { + if err := e.Context.DeprecatedSetDataContentEncoding(enc); err != nil { + e.fieldError("datacontentencoding", err) + } else { + e.fieldOK("datacontentencoding") + } +} + +// SetExtension implements EventWriter.SetExtension +func (e *Event) SetExtension(name string, obj interface{}) { + if err := e.Context.SetExtension(name, obj); err != nil { + e.fieldError("extension:"+name, err) + } else { + e.fieldOK("extension:" + name) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go new file mode 100644 index 00000000..a39565af --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import "time" + +// EventContextReader are the methods required to be a reader of context +// attributes. +type EventContextReader interface { + // GetSpecVersion returns the native CloudEvents Spec version of the event + // context. + GetSpecVersion() string + // GetType returns the CloudEvents type from the context. + GetType() string + // GetSource returns the CloudEvents source from the context. + GetSource() string + // GetSubject returns the CloudEvents subject from the context. + GetSubject() string + // GetID returns the CloudEvents ID from the context. + GetID() string + // GetTime returns the CloudEvents creation time from the context. + GetTime() time.Time + // GetDataSchema returns the CloudEvents schema URL (if any) from the + // context. + GetDataSchema() string + // GetDataContentType returns content type on the context. + GetDataContentType() string + // DeprecatedGetDataContentEncoding returns content encoding on the context. + DeprecatedGetDataContentEncoding() string + + // GetDataMediaType returns the MIME media type for encoded data, which is + // needed by both encoding and decoding. This is a processed form of + // GetDataContentType and it may return an error. + GetDataMediaType() (string, error) + + // DEPRECATED: Access extensions directly via the GetExtensions() + // For example replace this: + // + // var i int + // err := ec.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(ec.GetExtensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // GetExtensions returns the full extensions map. + // + // Extensions use the CloudEvents type system, details in package cloudevents/types. + GetExtensions() map[string]interface{} + + // GetExtension returns the extension associated with with the given key. + // The given key is case insensitive. If the extension can not be found, + // an error will be returned. + GetExtension(string) (interface{}, error) +} + +// EventContextWriter are the methods required to be a writer of context +// attributes. +type EventContextWriter interface { + // SetType sets the type of the context. + SetType(string) error + // SetSource sets the source of the context. + SetSource(string) error + // SetSubject sets the subject of the context. + SetSubject(string) error + // SetID sets the ID of the context. + SetID(string) error + // SetTime sets the time of the context. + SetTime(time time.Time) error + // SetDataSchema sets the schema url of the context. + SetDataSchema(string) error + // SetDataContentType sets the data content type of the context. + SetDataContentType(string) error + // DeprecatedSetDataContentEncoding sets the data context encoding of the context. + DeprecatedSetDataContentEncoding(string) error + + // SetExtension sets the given interface onto the extension attributes + // determined by the provided name. + // + // This function fails in V1 if the name doesn't respect the regex ^[a-zA-Z0-9]+$ + // + // Package ./types documents the types that are allowed as extension values. + SetExtension(string, interface{}) error +} + +// EventContextConverter are the methods that allow for event version +// conversion. +type EventContextConverter interface { + // AsV03 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.3 field names, moving fields to or + // from extensions as necessary. + AsV03() *EventContextV03 + + // AsV1 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v1.0 field names, moving fields to or + // from extensions as necessary. + AsV1() *EventContextV1 +} + +// EventContext is conical interface for a CloudEvents Context. +type EventContext interface { + // EventContextConverter allows for conversion between versions. + EventContextConverter + + // EventContextReader adds methods for reading context. + EventContextReader + + // EventContextWriter adds methods for writing to context. + EventContextWriter + + // Validate the event based on the specifics of the CloudEvents spec version + // represented by this event context. + Validate() ValidationError + + // Clone clones the event context. + Clone() EventContext + + // String returns a pretty-printed representation of the EventContext. + String() string +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go new file mode 100644 index 00000000..3f050554 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -0,0 +1,330 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/json" + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const ( + // CloudEventsVersionV03 represents the version 0.3 of the CloudEvents spec. + CloudEventsVersionV03 = "0.3" +) + +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + +// EventContextV03 represents the non-data attributes of a CloudEvents v0.3 +// event. +type EventContextV03 struct { + // Type - The type of the occurrence which has happened. + Type string `json:"type"` + // Source - A URI describing the event producer. + Source types.URIRef `json:"source"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + Subject *string `json:"subject,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Time - A Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URIRef `json:"schemaurl,omitempty"` + // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. + DataContentType *string `json:"datacontenttype,omitempty"` + // DeprecatedDataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. + DataContentEncoding *string `json:"datacontentencoding,omitempty"` + // Extensions - Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV03)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. +func (ec *EventContextV03) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV03) Clone() EventContext { + ec03 := ec.AsV03() + ec03.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec03.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.SchemaURL != nil { + ec03.SchemaURL = types.Clone(ec.SchemaURL).(*types.URIRef) + } + ec03.Extensions = ec.cloneExtensions() + return ec03 +} + +func (ec *EventContextV03) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV03) AsV03() *EventContextV03 { + return &ec +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV03) AsV1() *EventContextV1 { + ret := EventContextV1{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + if ec.SchemaURL != nil { + ret.DataSchema = &types.URI{URL: ec.SchemaURL.URL} + } + + // DataContentEncoding was removed in 1.0, so put it in an extension for 1.0. + if ec.DataContentEncoding != nil { + _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see +// https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md +// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e +// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding +// + https://github.com/cloudevents/spec/pull/406 -> subject +func (ec EventContextV03) Validate() ValidationError { + errors := map[string]error{} + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors["schemaurl"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986") + } + } + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } + } + } + + // datacontentencoding + // Type: String per RFC 2045 Section 6.1 + // Constraints: + // The attribute MUST be set if the data attribute contains string-encoded binary data. + // Otherwise the attribute MUST NOT be set. + // If present, MUST adhere to RFC 2045 Section 6.1 + if ec.DataContentEncoding != nil { + dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) + if dataContentEncoding != Base64 { + errors["datacontentencoding"] = fmt.Errorf("if present, MUST adhere to RFC 2045 Section 6.1") + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV03) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV03 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + if ec.DataContentEncoding != nil { + b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go new file mode 100644 index 00000000..2cd27a70 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -0,0 +1,99 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV03) GetSpecVersion() string { + return CloudEventsVersionV03 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV03) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV03) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV03) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV03) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV03) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV03) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV03) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV03) GetDataSchema() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV03) DeprecatedGetDataContentEncoding() string { + if ec.DataContentEncoding != nil { + return *ec.DataContentEncoding + } + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV03) GetExtensions() map[string]interface{} { + return ec.Extensions +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV03) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go new file mode 100644 index 00000000..5d664635 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV03)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV03) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV03) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV03) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV03) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV03) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV03) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV03) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URIRef{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV03) DeprecatedSetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + ec.DataContentEncoding = nil + } else { + ec.DataContentEncoding = &e + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go new file mode 100644 index 00000000..8f164502 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -0,0 +1,315 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// WIP: AS OF SEP 20, 2019 + +const ( + // CloudEventsVersionV1 represents the version 1.0 of the CloudEvents spec. + CloudEventsVersionV1 = "1.0" +) + +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + +// EventContextV1 represents the non-data attributes of a CloudEvents v1.0 +// event. +type EventContextV1 struct { + // ID of the event; must be non-empty and unique within the scope of the producer. + // +required + ID string `json:"id"` + // Source - A URI describing the event producer. + // +required + Source types.URIRef `json:"source"` + // Type - The type of the occurrence which has happened. + // +required + Type string `json:"type"` + + // DataContentType - A MIME (RFC2046) string describing the media type of `data`. + // +optional + DataContentType *string `json:"datacontenttype,omitempty"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + // +optional + Subject *string `json:"subject,omitempty"` + // Time - A Timestamp when the event happened. + // +optional + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + // +optional + DataSchema *types.URI `json:"dataschema,omitempty"` + + // Extensions - Additional extension metadata beyond the base spec. + // +optional + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV1)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { + name = strings.ToLower(name) + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Only support *string for now. + if v, ok := obj.(*string); ok { + if *v, ok = value.(string); ok { + return nil + } + } + return fmt.Errorf("unknown extension type %T", obj) +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. +func (ec *EventContextV1) SetExtension(name string, value interface{}) error { + if err := validateExtensionName(name); err != nil { + return err + } + + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + name = strings.ToLower(name) + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) // Ensure it's a legal CE attribute value + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV1) Clone() EventContext { + ec1 := ec.AsV1() + ec1.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec1.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.DataSchema != nil { + ec1.DataSchema = types.Clone(ec.DataSchema).(*types.URI) + } + ec1.Extensions = ec.cloneExtensions() + return ec1 +} + +func (ec *EventContextV1) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV1) AsV03() *EventContextV03 { + ret := EventContextV03{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + + if ec.DataSchema != nil { + ret.SchemaURL = &types.URIRef{URL: ec.DataSchema.URL} + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + // DeprecatedDataContentEncoding was introduced in 0.3, removed in 1.0 + if strings.EqualFold(k, DataContentEncodingKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.DataContentEncoding = &etv + } + continue + } + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV1) AsV1() *EventContextV1 { + return &ec +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v1.0/spec.md. +func (ec EventContextV1) Validate() ValidationError { + errors := map[string]error{} + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + // no way to test "MUST be unique within the scope of the producer" + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + // MUST be a non-empty URI-reference + // An absolute URI is RECOMMENDED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // The following attributes are optional but still have validation. + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("failed to parse RFC 2046 media type %w", err) + } + } + } + + // dataschema + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.DataSchema != nil { + if !ec.DataSchema.Validate() { + errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986, Section 4.3. Absolute URI") + } + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV1) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV1 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.DataSchema != nil { + b.WriteString(" dataschema: " + ec.DataSchema.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go new file mode 100644 index 00000000..74f73b02 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -0,0 +1,104 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV1) GetSpecVersion() string { + return CloudEventsVersionV1 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV1) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV1) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV1) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV1) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV1) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV1) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV1) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV1) GetDataSchema() string { + if ec.DataSchema != nil { + return ec.DataSchema.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV1) DeprecatedGetDataContentEncoding() string { + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV1) GetExtensions() map[string]interface{} { + if len(ec.Extensions) == 0 { + return nil + } + // For now, convert the extensions of v1.0 to the pre-v1.0 style. + ext := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range ec.Extensions { + ext[k] = v + } + return ext +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV1) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go new file mode 100644 index 00000000..5f2aca76 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -0,0 +1,97 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV1)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV1) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV1) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV1) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV1) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV1) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV1) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV1) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.DataSchema = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.DataSchema = &types.URI{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV1) DeprecatedSetDataContentEncoding(e string) error { + return errors.New("deprecated: SetDataContentEncoding is not supported in v1.0 of CloudEvents") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go new file mode 100644 index 00000000..72d0e757 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // DataContentEncodingKey is the key to DeprecatedDataContentEncoding for versions that do not support data content encoding + // directly. + DataContentEncodingKey = "datacontentencoding" +) + +var ( + // This determines the behavior of validateExtensionName(). For MaxExtensionNameLength > 0, an error will be returned, + // if len(key) > MaxExtensionNameLength + MaxExtensionNameLength = 0 +) + +func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { + lkey := strings.ToLower(key) + for k, v := range space { + if strings.EqualFold(lkey, strings.ToLower(k)) { + return v, true + } + } + return nil, false +} + +func IsExtensionNameValid(key string) bool { + if err := validateExtensionName(key); err != nil { + return false + } + return true +} + +func validateExtensionName(key string) error { + if len(key) < 1 { + return errors.New("bad key, CloudEvents attribute names MUST NOT be empty") + } + if MaxExtensionNameLength > 0 && len(key) > MaxExtensionNameLength { + return fmt.Errorf("bad key, CloudEvents attribute name '%s' is longer than %d characters", key, MaxExtensionNameLength) + } + + for _, c := range key { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go new file mode 100644 index 00000000..3c771fc5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -0,0 +1,25 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package protocol defines interfaces to decouple the client package +from protocol implementations. + +Most event sender and receiver applications should not use this +package, they should use the client package. This package is for +infrastructure developers implementing new transports, or intermediary +components like importers, channels or brokers. + +Available protocols: + +* HTTP (using net/http) +* Kafka (using github.com/Shopify/sarama) +* AMQP (using pack.ag/amqp) +* Go Channels +* Nats +* Nats Streaming (stan) +* Google PubSub +*/ +package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go new file mode 100644 index 00000000..a3f33526 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import "fmt" + +// ErrTransportMessageConversion is an error produced when the transport +// message can not be converted. +type ErrTransportMessageConversion struct { + fatal bool + handled bool + transport string + message string +} + +// NewErrTransportMessageConversion makes a new ErrTransportMessageConversion. +func NewErrTransportMessageConversion(transport, message string, handled, fatal bool) *ErrTransportMessageConversion { + return &ErrTransportMessageConversion{ + transport: transport, + message: message, + handled: handled, + fatal: fatal, + } +} + +// IsFatal reports if this error should be considered fatal. +func (e *ErrTransportMessageConversion) IsFatal() bool { + return e.fatal +} + +// Handled reports if this error should be considered accepted and no further action. +func (e *ErrTransportMessageConversion) Handled() bool { + return e.handled +} + +// Error implements error.Error +func (e *ErrTransportMessageConversion) Error() string { + return fmt.Sprintf("transport %s failed to convert message: %s", e.transport, e.message) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go new file mode 100644 index 00000000..48f03fb6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" + "time" +) + +type WebhookConfig struct { + AllowedMethods []string // defaults to POST + AllowedRate *int + AutoACKCallback bool + AllowedOrigins []string +} + +const ( + DefaultAllowedRate = 1000 + DefaultTimeout = time.Second * 600 +) + +// TODO: implement rate limiting. +// Throttling is indicated by requests being rejected using HTTP status code 429 Too Many Requests. +// TODO: use this if Webhook Request Origin has been turned on. +// Inbound requests should be rejected if Allowed Origins is required by SDK. + +func (p *Protocol) OptionsHandler(rw http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodOptions || p.WebhookConfig == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + + headers := make(http.Header) + + // The spec does not say we need to validate the origin, just the request origin. + // After the handshake, we will validate the origin. + if origin, ok := p.ValidateRequestOrigin(req); !ok { + rw.WriteHeader(http.StatusBadRequest) + return + } else { + headers.Set("WebHook-Allowed-Origin", origin) + } + + allowedRateRequired := false + if _, ok := req.Header[http.CanonicalHeaderKey("WebHook-Request-Rate")]; ok { + // must send WebHook-Allowed-Rate + allowedRateRequired = true + } + + if p.WebhookConfig.AllowedRate != nil { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(*p.WebhookConfig.AllowedRate)) + } else if allowedRateRequired { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(DefaultAllowedRate)) + } + + if len(p.WebhookConfig.AllowedMethods) > 0 { + headers.Set("Allow", strings.Join(p.WebhookConfig.AllowedMethods, ", ")) + } else { + headers.Set("Allow", http.MethodPost) + } + + cb := req.Header.Get("WebHook-Request-Callback") + if cb != "" { + if p.WebhookConfig.AutoACKCallback { + go func() { + reqAck, err := http.NewRequest(http.MethodPost, cb, nil) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to create http request attempting to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + + // Write out the headers. + for k := range headers { + reqAck.Header.Set(k, headers.Get(k)) + } + + _, err = http.DefaultClient.Do(reqAck) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + }() + return + } else { + cecontext.LoggerFrom(req.Context()).Infof("ACTION REQUIRED: Please validate web hook request callback: %q", cb) + // TODO: what to do pending https://github.com/cloudevents/spec/issues/617 + return + } + } + + // Write out the headers. + for k := range headers { + rw.Header().Set(k, headers.Get(k)) + } +} + +func (p *Protocol) ValidateRequestOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("WebHook-Request-Origin")) +} + +func (p *Protocol) ValidateOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("Origin")) +} + +func (p *Protocol) validateOrigin(ro string) (string, bool) { + cecontext.LoggerFrom(context.TODO()).Infow("Validating origin.", zap.String("origin", ro)) + + for _, ao := range p.WebhookConfig.AllowedOrigins { + if ao == "*" { + return ao, true + } + // TODO: it is not clear what the rules for allowed hosts are. + // Need to find docs for this. For now, test for prefix. + if strings.HasPrefix(ro, ao) { + return ao, true + } + } + + return ro, false +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 00000000..e973738c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go new file mode 100644 index 00000000..3428ea38 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package http implements an HTTP binding using net/http module +*/ +package http diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go new file mode 100644 index 00000000..055a5c4d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -0,0 +1,55 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +var attributeHeadersMapping map[string]string + +type customHeaderKey int + +const ( + headerKey customHeaderKey = iota +) + +func init() { + attributeHeadersMapping = make(map[string]string) + for _, v := range specs.Versions() { + for _, a := range v.Attributes() { + if a.Kind() == spec.DataContentType { + attributeHeadersMapping[a.Name()] = ContentType + } else { + attributeHeadersMapping[a.Name()] = textproto.CanonicalMIMEHeaderKey(prefix + a.Name()) + } + } + } +} + +func extNameToHeaderName(name string) string { + var b strings.Builder + b.Grow(len(name) + len(prefix)) + b.WriteString(prefix) + b.WriteRune(unicode.ToUpper(rune(name[0]))) + b.WriteString(name[1:]) + return b.String() +} + +func HeaderFrom(ctx context.Context) http.Header { + return binding.GetOrDefaultFromCtx(ctx, headerKey, make(http.Header)).(http.Header) +} + +func WithCustomHeader(ctx context.Context, header http.Header) context.Context { + return context.WithValue(ctx, headerKey, header) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go new file mode 100644 index 00000000..7a7c36f9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -0,0 +1,175 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "io" + nethttp "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +const prefix = "Ce-" + +var specs = spec.WithPrefixMatchExact( + func(s string) string { + if s == "datacontenttype" { + return "Content-Type" + } else { + return textproto.CanonicalMIMEHeaderKey("Ce-" + s) + } + }, + "Ce-", +) + +const ContentType = "Content-Type" +const ContentLength = "Content-Length" + +// Message holds the Header and Body of a HTTP Request or Response. +// The Message instance *must* be constructed from NewMessage function. +// This message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +type Message struct { + Header nethttp.Header + BodyReader io.ReadCloser + OnFinish func(error) error + + ctx context.Context + + format format.Format + version spec.Version +} + +// Check if http.Message implements binding.Message +var _ binding.Message = (*Message)(nil) +var _ binding.MessageContext = (*Message)(nil) +var _ binding.MessageMetadataReader = (*Message)(nil) + +// NewMessage returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessage(header nethttp.Header, body io.ReadCloser) *Message { + m := Message{Header: header} + if body != nil { + m.BodyReader = body + } + if m.format = format.Lookup(header.Get(ContentType)); m.format == nil { + m.version = specs.Version(m.Header.Get(specs.PrefixedSpecVersionName())) + } + return &m +} + +// NewMessageFromHttpRequest returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpRequest(req *nethttp.Request) *Message { + if req == nil { + return nil + } + message := NewMessage(req.Header, req.Body) + message.ctx = req.Context() + return message +} + +// NewMessageFromHttpResponse returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpResponse(resp *nethttp.Response) *Message { + if resp == nil { + return nil + } + msg := NewMessage(resp.Header, resp.Body) + return msg +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } else { + return encoder.SetStructuredEvent(ctx, m.format, m.BodyReader) + } +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.version == nil { + return binding.ErrNotBinary + } + + for k, v := range m.Header { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, v[0]) + } else if strings.HasPrefix(k, prefix) { + // Trim Prefix + To lower + var b strings.Builder + b.Grow(len(k) - len(prefix)) + b.WriteRune(unicode.ToLower(rune(k[len(prefix)]))) + b.WriteString(k[len(prefix)+1:]) + err = encoder.SetExtension(b.String(), v[0]) + } + if err != nil { + return err + } + } + + if m.BodyReader != nil { + err = encoder.SetData(m.BodyReader) + if err != nil { + return err + } + } + + return +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + h := m.Header[attributeHeadersMapping[attr.Name()]] + if h != nil { + return attr, h[0] + } + return attr, nil + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + h := m.Header[extNameToHeaderName(name)] + if h != nil { + return h[0] + } + return nil +} + +func (m *Message) Context() context.Context { + return m.ctx +} + +func (m *Message) Finish(err error) error { + if m.BodyReader != nil { + _ = m.BodyReader.Close() + } + if m.OnFinish != nil { + return m.OnFinish(err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go new file mode 100644 index 00000000..6582af3e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -0,0 +1,300 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Protocol) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http target option can not set nil protocol") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + p.Target = target + + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + p.RequestTemplate.URL = target + + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http header option can not set nil protocol") + } + key = strings.TrimSpace(key) + if key != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + if p.RequestTemplate.Header == nil { + p.RequestTemplate.Header = nethttp.Header{} + } + p.RequestTemplate.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http shutdown timeout option can not set nil protocol") + } + p.ShutdownTimeout = timeout + return nil + } +} + +func checkListen(p *Protocol, prefix string) error { + switch { + case p.listener.Load() != nil: + return fmt.Errorf("error setting %v: listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http port option can not set nil protocol") + } + if port < 0 || port > 65535 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(p, "http port option"); err != nil { + return err + } + p.Port = port + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http listener option can not set nil protocol") + } + if err := checkListen(p, "http listener"); err != nil { + return err + } + p.listener.Store(l) + return nil + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http path option can not set nil protocol") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + p.Path = path + return nil + } +} + +// WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use +// when using an HTTP request. +func WithMethod(method string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http method option can not set nil protocol") + } + method = strings.TrimSpace(method) + if method != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{} + } + p.RequestTemplate.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http middleware option can not set nil protocol") + } + p.middleware = append(p.middleware, middleware) + return nil + } +} + +// WithRoundTripper sets the HTTP RoundTripper. +func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + p.roundTripper = roundTripper + return nil + } +} + +// WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen. +func WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + if p.roundTripper == nil { + if p.Client == nil { + p.roundTripper = nethttp.DefaultTransport + } else { + p.roundTripper = p.Client.Transport + } + } + p.roundTripper = decorator(p.roundTripper) + return nil + } +} + +// WithClient sets the protocol client +func WithClient(client nethttp.Client) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("client option can not set nil protocol") + } + p.Client = &client + return nil + } +} + +// WithGetHandlerFunc sets the http GET handler func +func WithGetHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http GET handler func can not set nil protocol") + } + p.GetHandlerFn = fn + return nil + } +} + +// WithOptionsHandlerFunc sets the http OPTIONS handler func +func WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = fn + return nil + } +} + +// WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options. +// methods: the supported methods reported to OPTIONS caller. +// rate: the rate limit reported to OPTIONS caller. +// origins: the prefix of the accepted origins, or "*". +// callback: preform the callback to ACK the OPTIONS request. +func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = p.OptionsHandler + p.WebhookConfig = &WebhookConfig{ + AllowedMethods: methods, + AllowedRate: &rate, + AllowedOrigins: origins, + AutoACKCallback: callback, + } + return nil + } +} + +// IsRetriable is a custom function that can be used to override the +// default retriable status codes. +type IsRetriable func(statusCode int) bool + +// WithIsRetriableFunc sets the function that gets called to determine if an +// error should be retried. If not set, the defaultIsRetriableFunc is used. +func WithIsRetriableFunc(isRetriable IsRetriable) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("isRetriable handler func can not set nil protocol") + } + if isRetriable == nil { + return fmt.Errorf("isRetriable handler can not be nil") + } + p.isRetriableFunc = isRetriable + return nil + } +} + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go new file mode 100644 index 00000000..7ee3b8fe --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -0,0 +1,411 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 +) + +type msgErr struct { + msg *Message + respFn protocol.ResponseFn + err error +} + +// Default error codes that we retry on - string isn't used, it's just there so +// people know what each error code's title is. +// To modify this use Option +var defaultRetriableErrors = map[int]string{ + 404: "Not Found", + 413: "Payload Too Large", + 425: "Too Early", + 429: "Too Many Requests", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", +} + +// Protocol acts as both a http client and a http handler. +type Protocol struct { + Target *url.URL + RequestTemplate *http.Request + Client *http.Client + incoming chan msgErr + + // OptionsHandlerFn handles the OPTIONS method requests and is intended to + // implement the abuse protection spec: + // https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection + OptionsHandlerFn http.HandlerFunc + WebhookConfig *WebhookConfig + + GetHandlerFn http.HandlerFunc + DeleteHandlerFn http.HandlerFunc + + // To support Opener: + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If 0, DefaultShutdownTimeout is used. + ShutdownTimeout time.Duration + + // Port is the port configured to bind the receiver to. Defaults to 8080. + // If you want to know the effective port you're listening to, use GetListeningPort() + Port int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + + // Receive Mutex + reMu sync.Mutex + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Protocol will create a one. + Handler *http.ServeMux + + listener atomic.Value + roundTripper http.RoundTripper + server *http.Server + handlerRegistered bool + middleware []Middleware + limiter RateLimiter + + isRetriableFunc IsRetriable +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + incoming: make(chan msgErr), + Port: -1, + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Client == nil { + // This is how http.DefaultClient is initialized. We do not just use + // that because when WithRoundTripper is used, it will change the client's + // transport, which would cause that transport to be used process-wide. + p.Client = &http.Client{} + } + + if p.roundTripper != nil { + p.Client.Transport = p.roundTripper + } + + if p.ShutdownTimeout == 0 { + p.ShutdownTimeout = DefaultShutdownTimeout + } + + if p.isRetriableFunc == nil { + p.isRetriableFunc = defaultIsRetriableFunc + } + + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + + return p, nil +} + +// NewObserved creates an HTTP protocol with trace propagating middleware. +// Deprecated: now this behaves like New and it will be removed in future releases, +// setup the http observed protocol using the opencensus separate module NewObservedHttp +var NewObserved = New + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send implements binding.Sender +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if ctx == nil { + return fmt.Errorf("nil Context") + } else if m == nil { + return fmt.Errorf("nil Message") + } + + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } + } + } + } + return err +} + +// Request implements binding.Requester +func (p *Protocol) Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } else if m == nil { + return nil, fmt.Errorf("nil Message") + } + + var err error + defer func() { _ = m.Finish(err) }() + + req := p.makeRequest(ctx) + + if p.Client == nil || req == nil || req.URL == nil { + return nil, fmt.Errorf("not initialized: %#v", p) + } + + if err = WriteRequest(ctx, m, req, transformers...); err != nil { + return nil, err + } + + return p.do(ctx, req) +} + +func (p *Protocol) makeRequest(ctx context.Context) *http.Request { + req := &http.Request{ + Method: http.MethodPost, + Header: HeaderFrom(ctx), + } + + if p.RequestTemplate != nil { + req.Method = p.RequestTemplate.Method + req.URL = p.RequestTemplate.URL + req.Close = p.RequestTemplate.Close + req.Host = p.RequestTemplate.Host + copyHeadersEnsure(p.RequestTemplate.Header, &req.Header) + } + + if p.Target != nil { + req.URL = p.Target + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + return req.WithContext(ctx) +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Receive the next incoming HTTP request as a CloudEvent. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } + + msg, fn, err := p.Respond(ctx) + // No-op the response when finish is invoked. + if msg != nil { + return binding.WithFinish(msg, func(err error) { + if fn != nil { + _ = fn(ctx, nil, nil) + } + }), err + } else { + return nil, err + } +} + +// Respond receives the next incoming HTTP request as a CloudEvent and waits +// for the response callback to invoked before continuing. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.ResponseFn, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("nil Context") + } + + select { + case in, ok := <-p.incoming: + if !ok { + return nil, nil, io.EOF + } + + if in.msg == nil { + return nil, in.respFn, in.err + } + return in.msg, in.respFn, in.err + + case <-ctx.Done(): + return nil, nil, io.EOF + } +} + +// ServeHTTP implements http.Handler. +// Blocks until ResponseFn is invoked. +func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + + // Filter the GET style methods: + switch req.Method { + case http.MethodOptions: + if p.OptionsHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.OptionsHandlerFn(rw, req) + return + + case http.MethodGet: + if p.GetHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.GetHandlerFn(rw, req) + return + + case http.MethodDelete: + if p.DeleteHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.DeleteHandlerFn(rw, req) + return + } + + m := NewMessageFromHttpRequest(req) + if m == nil { + // Should never get here unless ServeHTTP is called directly. + p.incoming <- msgErr{msg: nil, err: binding.ErrUnknownEncoding} + rw.WriteHeader(http.StatusBadRequest) + return // if there was no message, return. + } + + var finishErr error + m.OnFinish = func(err error) error { + finishErr = err + return nil + } + + wg := sync.WaitGroup{} + wg.Add(1) + var fn protocol.ResponseFn = func(ctx context.Context, respMsg binding.Message, res protocol.Result, transformers ...binding.Transformer) error { + // Unblock the ServeHTTP after the reply is written + defer func() { + wg.Done() + }() + + if finishErr != nil { + http.Error(rw, fmt.Sprintf("Cannot forward CloudEvent: %s", finishErr), http.StatusInternalServerError) + return finishErr + } + + status := http.StatusOK + var errMsg string + if res != nil { + var result *Result + switch { + case protocol.ResultAs(res, &result): + if result.StatusCode > 100 && result.StatusCode < 600 { + status = result.StatusCode + } + errMsg = fmt.Errorf(result.Format, result.Args...).Error() + case !protocol.IsACK(res): + // Map client errors to http status code + validationError := event.ValidationError{} + if errors.As(res, &validationError) { + status = http.StatusBadRequest + rw.Header().Set("content-type", "text/plain") + rw.WriteHeader(status) + _, _ = rw.Write([]byte(validationError.Error())) + return validationError + } else if errors.Is(res, binding.ErrUnknownEncoding) { + status = http.StatusUnsupportedMediaType + } else { + status = http.StatusInternalServerError + } + } + } + + if respMsg != nil { + err := WriteResponseWriter(ctx, respMsg, status, rw, transformers...) + return respMsg.Finish(err) + } + + rw.WriteHeader(status) + if _, err := rw.Write([]byte(errMsg)); err != nil { + return err + } + return nil + } + + p.incoming <- msgErr{msg: m, respFn: fn} // Send to Request + // Block until ResponseFn is invoked + wg.Wait() +} + +func defaultIsRetriableFunc(sc int) bool { + _, ok := defaultRetriableErrors[sc] + return ok +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go new file mode 100644 index 00000000..04ef9691 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -0,0 +1,143 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +var _ protocol.Opener = (*Protocol)(nil) + +func (p *Protocol) OpenInbound(ctx context.Context) error { + p.reMu.Lock() + defer p.reMu.Unlock() + + if p.Handler == nil { + p.Handler = http.NewServeMux() + } + + if !p.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + p.Handler.Handle(p.GetPath(), p) + p.handlerRegistered = true + } + + // After listener is invok + listener, err := p.listen() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: listener.Addr().String(), + Handler: attachMiddleware(p.Handler, p.middleware), + ReadTimeout: DefaultTimeout, + WriteTimeout: DefaultTimeout, + } + + // Shutdown + defer func() { + _ = p.server.Close() + p.server = nil + }() + + errChan := make(chan error) + go func() { + errChan <- p.server.Serve(listener) + }() + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a graceful shutdown. + ctx, cancel := context.WithTimeout(context.Background(), p.ShutdownTimeout) + defer cancel() + + shdwnErr := p.server.Shutdown(ctx) + if shdwnErr != nil { + shdwnErr = fmt.Errorf("shutting down HTTP server: %w", shdwnErr) + } + + // Wait for server goroutine to exit + rntmErr := <-errChan + if rntmErr != nil && rntmErr != http.ErrServerClosed { + rntmErr = fmt.Errorf("server failed during shutdown: %w", rntmErr) + + if shdwnErr != nil { + return fmt.Errorf("combined error during shutdown of HTTP server: %w, %v", + shdwnErr, rntmErr) + } + + return rntmErr + } + + return shdwnErr + + case err := <-errChan: + if err != nil { + return fmt.Errorf("during runtime of HTTP server: %w", err) + } + return nil + } +} + +// GetListeningPort returns the listening port. +// Returns -1 if it's not listening. +func (p *Protocol) GetListeningPort() int { + if listener := p.listener.Load(); listener != nil { + if tcpAddr, ok := listener.(net.Listener).Addr().(*net.TCPAddr); ok { + return tcpAddr.Port + } + } + return -1 +} + +// listen if not already listening, update t.Port +func (p *Protocol) listen() (net.Listener, error) { + if p.listener.Load() == nil { + port := 8080 + if p.Port != -1 { + port = p.Port + if port < 0 || port > 65535 { + return nil, fmt.Errorf("invalid port %d", port) + } + } + var err error + var listener net.Listener + if listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + p.listener.Store(listener) + return listener, nil + } + return p.listener.Load().(net.Listener), nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (p *Protocol) GetPath() string { + path := strings.TrimSpace(p.Path) + if len(path) > 0 { + return path + } + return "/" // default +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 00000000..9c4c10a2 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go new file mode 100644 index 00000000..21fc7e9b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "time" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +func (p *Protocol) do(ctx context.Context, req *http.Request) (binding.Message, error) { + params := cecontext.RetriesFrom(ctx) + + switch params.Strategy { + case cecontext.BackoffStrategyConstant, cecontext.BackoffStrategyLinear, cecontext.BackoffStrategyExponential: + return p.doWithRetry(ctx, params, req) + case cecontext.BackoffStrategyNone: + fallthrough + default: + return p.doOnce(req) + } +} + +func (p *Protocol) doOnce(req *http.Request) (binding.Message, protocol.Result) { + resp, err := p.Client.Do(req) + if err != nil { + return nil, protocol.NewReceipt(false, "%w", err) + } + + var result protocol.Result + if resp.StatusCode/100 == 2 { + result = protocol.ResultACK + } else { + result = protocol.ResultNACK + } + + return NewMessage(resp.Header, resp.Body), NewResult(resp.StatusCode, "%w", result) +} + +func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParams, req *http.Request) (binding.Message, error) { + start := time.Now() + retry := 0 + results := make([]protocol.Result, 0) + + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = io.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + + for { + msg, result := p.doOnce(req) + + // Fast track common case. + if protocol.IsACK(result) { + return msg, NewRetriesResult(result, retry, start, results) + } + + var httpResult *Result + if errors.As(result, &httpResult) { + sc := httpResult.StatusCode + if !p.isRetriableFunc(sc) { + cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", + zap.Error(httpResult), + zap.Int("statusCode", sc)) + return msg, NewRetriesResult(result, retry, start, results) + } + } + + // total tries = retry + 1 + if err = params.Backoff(ctx, retry+1); err != nil { + // do not try again. + cecontext.LoggerFrom(ctx).Debugw("backoff error, will not try again", zap.Error(err)) + return msg, NewRetriesResult(result, retry, start, results) + } + + retry++ + resetBody(req, body) + results = append(results, result) + if msg != nil { + // avoid leak, forget message, ignore error + _ = msg.Finish(nil) + } + } +} + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = io.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go new file mode 100644 index 00000000..7a0b2626 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -0,0 +1,60 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "errors" + "fmt" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewResult returns a fully populated http Result that should be used as +// a transport.Result. +func NewResult(statusCode int, messageFmt string, args ...interface{}) protocol.Result { + return &Result{ + StatusCode: statusCode, + Format: messageFmt, + Args: args, + } +} + +// Result wraps the fields required to make adjustments for http Responses. +type Result struct { + StatusCode int + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Result)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Result) Is(target error) bool { + if o, ok := target.(*Result); ok { + return e.StatusCode == o.StatusCode + } + + // Special case for nil == ACK + if o, ok := target.(*protocol.Receipt); ok { + if e == nil && o.ACK { + return true + } + } + + // Allow for wrapped errors. + if e != nil { + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Result) Error() string { + return fmt.Sprintf("%d: %v", e.StatusCode, fmt.Errorf(e.Format, e.Args...)) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go new file mode 100644 index 00000000..f4046d52 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -0,0 +1,59 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewRetriesResult returns a http RetriesResult that should be used as +// a transport.Result without retries +func NewRetriesResult(result protocol.Result, retries int, startTime time.Time, attempts []protocol.Result) protocol.Result { + rr := &RetriesResult{ + Result: result, + Retries: retries, + Duration: time.Since(startTime), + } + if len(attempts) > 0 { + rr.Attempts = attempts + } + return rr +} + +// RetriesResult wraps the fields required to make adjustments for http Responses. +type RetriesResult struct { + // The last result + protocol.Result + + // Retries is the number of times the request was tried + Retries int + + // Duration records the time spent retrying. Exclude the successful request (if any) + Duration time.Duration + + // Attempts of all failed requests. Exclude last result. + Attempts []protocol.Result +} + +// make sure RetriesResult implements error. +var _ error = (*RetriesResult)(nil) + +// Is returns if the target error is a RetriesResult type checking target. +func (e *RetriesResult) Is(target error) bool { + return protocol.ResultIs(e.Result, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *RetriesResult) Error() string { + if e.Retries == 0 { + return e.Result.Error() + } + return fmt.Sprintf("%s (%dx)", e.Result.Error(), e.Retries) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go new file mode 100644 index 00000000..350fc1cf --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "encoding/json" + nethttp "net/http" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// NewEventFromHTTPRequest returns an Event. +func NewEventFromHTTPRequest(req *nethttp.Request) (*event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventFromHTTPResponse returns an Event. +func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go new file mode 100644 index 00000000..f22259a3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -0,0 +1,142 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteRequest fills the provided httpRequest with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.Transformer) error { + structuredWriter := (*httpRequestWriter)(httpRequest) + binaryWriter := (*httpRequestWriter)(httpRequest) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type httpRequestWriter http.Request + +func (b *httpRequestWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.Header.Set(ContentType, format.MediaType()) + return b.setBody(event) +} + +func (b *httpRequestWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) End(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) SetData(data io.Reader) error { + return b.setBody(data) +} + +// setBody is a cherry-pick of the implementation in http.NewRequestWithContext +func (b *httpRequestWriter) setBody(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = io.NopCloser(body) + } + b.Body = rc + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + b.ContentLength = int64(v.Len()) + buf := v.Bytes() + b.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return io.NopCloser(r), nil + } + case *bytes.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return io.NopCloser(&r), nil + } + case *strings.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return io.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if b.GetBody != nil && b.ContentLength == 0 { + b.Body = http.NoBody + b.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + } + return nil +} + +func (b *httpRequestWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.Header, mapping) + return nil + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[mapping] = append(b.Header[mapping], s) + return nil +} + +func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Header, extNameToHeaderName(name)) + return nil + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[extNameToHeaderName(name)] = []string{s} + return nil +} + +var ( + _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface + _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go new file mode 100644 index 00000000..41385dab --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteResponseWriter writes out to the the provided httpResponseWriter with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteResponseWriter(ctx context.Context, m binding.Message, status int, rw http.ResponseWriter, transformers ...binding.Transformer) error { + if status < 200 || status >= 600 { + status = http.StatusOK + } + writer := &httpResponseWriter{rw: rw, status: status} + + _, err := binding.Write( + ctx, + m, + writer, + writer, + transformers..., + ) + return err +} + +type httpResponseWriter struct { + rw http.ResponseWriter + status int + body io.Reader +} + +func (b *httpResponseWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.rw.Header().Set(ContentType, format.MediaType()) + b.body = event + return b.finalizeWriter() +} + +func (b *httpResponseWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpResponseWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.rw.Header(), mapping) + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[mapping] = append(b.rw.Header()[mapping], s) + return nil +} + +func (b *httpResponseWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.rw.Header(), extNameToHeaderName(name)) + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[extNameToHeaderName(name)] = []string{s} + return nil +} + +func (b *httpResponseWriter) SetData(reader io.Reader) error { + b.body = reader + return nil +} + +func (b *httpResponseWriter) finalizeWriter() error { + if b.body != nil { + // Try to figure it out if we have a content-length + contentLength := -1 + switch v := b.body.(type) { + case *bytes.Buffer: + contentLength = v.Len() + case *bytes.Reader: + contentLength = v.Len() + case *strings.Reader: + contentLength = v.Len() + } + + if contentLength != -1 { + b.rw.Header().Add("Content-length", strconv.Itoa(contentLength)) + } + + // Finalize the headers. + b.rw.WriteHeader(b.status) + + // Write body. + _, err := io.Copy(b.rw, b.body) + if err != nil { + return err + } + } else { + // Finalize the headers. + b.rw.WriteHeader(b.status) + } + return nil +} + +func (b *httpResponseWriter) End(ctx context.Context) error { + return b.finalizeWriter() +} + +var _ binding.StructuredWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go new file mode 100644 index 00000000..e7a74294 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Receiver receives messages. +type Receiver interface { + // Receive blocks till a message is received or ctx expires. + // Receive can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message + Receive(ctx context.Context) (binding.Message, error) +} + +// ReceiveCloser is a Receiver that can be closed. +type ReceiveCloser interface { + Receiver + Closer +} + +// ResponseFn is the function callback provided from Responder.Respond to allow +// for a receiver to "reply" to a message it receives. +// transformers are applied when the message is written on the wire. +type ResponseFn func(ctx context.Context, m binding.Message, r Result, transformers ...binding.Transformer) error + +// Responder receives messages and is given a callback to respond. +type Responder interface { + // Respond blocks till a message is received or ctx expires. + // Respond can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message, + // while the protocol implementation is responsible for `Finish()` the response message. + // The caller MUST invoke ResponseFn, in order to avoid leaks. + // The correct flow for the caller is to finish the received message and then invoke the ResponseFn + Respond(ctx context.Context) (binding.Message, ResponseFn, error) +} + +// ResponderCloser is a Responder that can be closed. +type ResponderCloser interface { + Responder + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go new file mode 100644 index 00000000..4a058c96 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -0,0 +1,23 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" +) + +// Opener is the common interface for things that need to be opened. +type Opener interface { + // OpenInbound is a blocking call and ctx is used to stop the Inbound message Receiver/Responder. + // Closing the context won't close the Receiver/Responder, aka it won't invoke Close(ctx). + OpenInbound(ctx context.Context) error +} + +// Closer is the common interface for things that can be closed. +// After invoking Close(ctx), you cannot reuse the object you closed. +type Closer interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go new file mode 100644 index 00000000..e44fa432 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -0,0 +1,49 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Sender sends messages. +type Sender interface { + // Send a message. + // + // Send returns when the "outbound" message has been sent. The Sender may + // still be expecting acknowledgment or holding other state for the message. + // + // m.Finish() is called when sending is finished (both succeeded or failed): + // expected acknowledgments (or errors) have been received, the Sender is + // no longer holding any state for the message. + // m.Finish() may be called during or after Send(). + // + // transformers are applied when the message is written on the wire. + Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error +} + +// SendCloser is a Sender that can be closed. +type SendCloser interface { + Sender + Closer +} + +// Requester sends a message and receives a response +// +// Optional interface that may be implemented by protocols that support +// request/response correlation. +type Requester interface { + // Request sends m like Sender.Send() but also arranges to receive a response. + Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) +} + +// RequesterCloser is a Requester that can be closed. +type RequesterCloser interface { + Requester + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go new file mode 100644 index 00000000..eae64e01 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -0,0 +1,127 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "errors" + "fmt" +) + +// Result leverages go's error wrapping. +type Result error + +// ResultIs reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// (text from errors/wrap.go) +var ResultIs = errors.Is + +// ResultAs finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +// (text from errors/wrap.go) +var ResultAs = errors.As + +func NewResult(messageFmt string, args ...interface{}) Result { + return fmt.Errorf(messageFmt, args...) +} + +// IsACK true means the recipient acknowledged the event. +func IsACK(target Result) bool { + // special case, nil target also means ACK. + if target == nil { + return true + } + + return ResultIs(target, ResultACK) +} + +// IsNACK true means the recipient did not acknowledge the event. +func IsNACK(target Result) bool { + return ResultIs(target, ResultNACK) +} + +// IsUndelivered true means the target result is not an ACK/NACK, but some other +// error unrelated to delivery not from the intended recipient. Likely target +// is an error that represents some part of the protocol is misconfigured or +// the event that was attempting to be sent was invalid. +func IsUndelivered(target Result) bool { + if target == nil { + // Short-circuit nil result is ACK. + return false + } + return !ResultIs(target, ResultACK) && !ResultIs(target, ResultNACK) +} + +var ( + ResultACK = NewReceipt(true, "") + ResultNACK = NewReceipt(false, "") +) + +// NewReceipt returns a fully populated protocol Receipt that should be used as +// a transport.Result. This type holds the base ACK/NACK results. +func NewReceipt(ack bool, messageFmt string, args ...interface{}) Result { + return &Receipt{ + Err: fmt.Errorf(messageFmt, args...), + ACK: ack, + } +} + +// Receipt wraps the fields required to understand if a protocol event is acknowledged. +type Receipt struct { + Err error + ACK bool +} + +// make sure Result implements error. +var _ error = (*Receipt)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Receipt) Is(target error) bool { + if o, ok := target.(*Receipt); ok { + if e == nil { + // Special case nil e as ACK. + return o.ACK + } + return e.ACK == o.ACK + } + // Allow for wrapped errors. + if e != nil { + return errors.Is(e.Err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Receipt) Error() string { + if e != nil { + return e.Err.Error() + } + return "" +} + +// Unwrap returns the wrapped error if exist or nil +func (e *Receipt) Unwrap() error { + if e != nil { + return errors.Unwrap(e.Err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf new file mode 100644 index 00000000..d6f26955 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf @@ -0,0 +1,3 @@ +checks = [ + "all", "-ST1003", +] \ No newline at end of file diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go new file mode 100644 index 00000000..81462687 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -0,0 +1,41 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import "reflect" + +// Allocate allocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise +// asValue is a Value of type t pointing to the same data as asPtr +func Allocate(obj interface{}) (asPtr interface{}, asValue reflect.Value) { + if obj == nil { + return nil, reflect.Value{} + } + + switch t := reflect.TypeOf(obj); t.Kind() { + case reflect.Ptr: + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.Map: + reflectPtr := reflect.MakeMap(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.String: + reflectPtr := reflect.New(t) + asPtr = "" + asValue = reflectPtr.Elem() + case reflect.Slice: + reflectPtr := reflect.MakeSlice(t, 0, 0) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + default: + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go new file mode 100644 index 00000000..3a0a595a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package types implements the CloudEvents type system. + +CloudEvents defines a set of abstract types for event context attributes. Each +type has a corresponding native Go type and a canonical string encoding. The +native Go types used to represent the CloudEvents types are: +bool, int32, string, []byte, *url.URL, time.Time + + +----------------+----------------+-----------------------------------+ + |CloudEvents Type|Native Type |Convertible From | + +================+================+===================================+ + |Bool |bool |bool | + +----------------+----------------+-----------------------------------+ + |Integer |int32 |Any numeric type with value in | + | | |range of int32 | + +----------------+----------------+-----------------------------------+ + |String |string |string | + +----------------+----------------+-----------------------------------+ + |Binary |[]byte |[]byte | + +----------------+----------------+-----------------------------------+ + |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | + +----------------+----------------+-----------------------------------+ + |URI |*url.URL |url.URL, types.URIRef, types.URI | + | | |Must be an absolute URI. | + +----------------+----------------+-----------------------------------+ + |Timestamp |time.Time |time.Time, types.Timestamp | + +----------------+----------------+-----------------------------------+ + +Extension attributes may be stored as a native type or a canonical string. The +To functions will convert to the desired from any convertible type +or from the canonical string form. + +The Parse and Format functions convert native types to/from +canonical strings. + +Note are no Parse or Format functions for URL or string. For URL use the +standard url.Parse() and url.URL.String(). The canonical string format of a +string is the string itself. +*/ +package types diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go new file mode 100644 index 00000000..ff049727 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -0,0 +1,75 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "time" +) + +// Timestamp wraps time.Time to normalize the time layout to RFC3339. It is +// intended to enforce compliance with the CloudEvents spec for their +// definition of Timestamp. Custom marshal methods are implemented to ensure +// the outbound Timestamp is a string in the RFC3339 layout. +type Timestamp struct { + time.Time +} + +// ParseTimestamp attempts to parse the given time assuming RFC3339 layout +func ParseTimestamp(s string) (*Timestamp, error) { + if s == "" { + return nil, nil + } + tt, err := ParseTime(s) + return &Timestamp{Time: tt}, err +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (t *Timestamp) MarshalJSON() ([]byte, error) { + if t == nil || t.IsZero() { + return []byte(`""`), nil + } + return []byte(fmt.Sprintf("%q", t)), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + var timestamp string + if err := json.Unmarshal(b, ×tamp); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (t *Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if t == nil || t.IsZero() { + return e.EncodeElement(nil, start) + } + return e.EncodeElement(t.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (t *Timestamp) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var timestamp string + if err := d.DecodeElement(×tamp, &start); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// String outputs the time using RFC3339 format. +func (t Timestamp) String() string { return FormatTime(t.Time) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go new file mode 100644 index 00000000..bed60809 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -0,0 +1,86 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URI is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI. Custom +// marshal methods are implemented to ensure the outbound URI object +// is a flat string. +type URI struct { + url.URL +} + +// ParseURI attempts to parse the given string as a URI. +func ParseURI(u string) *URI { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URI{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URI) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URI) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URI) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URI) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +func (u URI) Validate() bool { + return u.IsAbs() +} + +// String returns the full string representation of the URI-Reference. +func (u *URI) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go new file mode 100644 index 00000000..22fa1231 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -0,0 +1,82 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URIRef is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI-Reference. Custom +// marshal methods are implemented to ensure the outbound URIRef object is +// is a flat string. +type URIRef struct { + url.URL +} + +// ParseURIRef attempts to parse the given string as a URI-Reference. +func ParseURIRef(u string) *URIRef { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URIRef{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URIRef) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URIRef) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URIRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URIRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// String returns the full string representation of the URI-Reference. +func (u *URIRef) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go new file mode 100644 index 00000000..14004d3e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -0,0 +1,337 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" +) + +// FormatBool returns canonical string format: "true" or "false" +func FormatBool(v bool) string { return strconv.FormatBool(v) } + +// FormatInteger returns canonical string format: decimal notation. +func FormatInteger(v int32) string { return strconv.Itoa(int(v)) } + +// FormatBinary returns canonical string format: standard base64 encoding +func FormatBinary(v []byte) string { return base64.StdEncoding.EncodeToString(v) } + +// FormatTime returns canonical string format: RFC3339 with nanoseconds +func FormatTime(v time.Time) string { return v.UTC().Format(time.RFC3339Nano) } + +// ParseBool parse canonical string format: "true" or "false" +func ParseBool(v string) (bool, error) { return strconv.ParseBool(v) } + +// ParseInteger parse canonical string format: decimal notation. +func ParseInteger(v string) (int32, error) { + // Accept floating-point but truncate to int32 as per CE spec. + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + if f > math.MaxInt32 || f < math.MinInt32 { + return 0, rangeErr(v) + } + return int32(f), nil +} + +// ParseBinary parse canonical string format: standard base64 encoding +func ParseBinary(v string) ([]byte, error) { return base64.StdEncoding.DecodeString(v) } + +// ParseTime parse canonical string format: RFC3339 with nanoseconds +func ParseTime(v string) (time.Time, error) { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + err := convertErr(time.Time{}, v) + err.extra = ": not in RFC3339 format" + return time.Time{}, err + } + return t, nil +} + +// Format returns the canonical string format of v, where v can be +// any type that is convertible to a CloudEvents type. +func Format(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case bool: + return FormatBool(v), nil + case int32: + return FormatInteger(v), nil + case string: + return v, nil + case []byte: + return FormatBinary(v), nil + case URI: + return v.String(), nil + case URIRef: + // url.URL is often passed by pointer so allow both + return v.String(), nil + case Timestamp: + return FormatTime(v.Time), nil + default: + return "", fmt.Errorf("%T is not a CloudEvents type", v) + } +} + +// Validate v is a valid CloudEvents attribute value, convert it to one of: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +func Validate(v interface{}) (interface{}, error) { + switch v := v.(type) { + case bool, int32, string, []byte: + return v, nil // Already a CloudEvents type, no validation needed. + + case uint, uintptr, uint8, uint16, uint32, uint64: + u := reflect.ValueOf(v).Uint() + if u > math.MaxInt32 { + return nil, rangeErr(v) + } + return int32(u), nil + case int, int8, int16, int64: + i := reflect.ValueOf(v).Int() + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(i), nil + case float32, float64: + f := reflect.ValueOf(v).Float() + if f > math.MaxInt32 || f < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(f), nil + + case *url.URL: + if v == nil { + break + } + return URI{URL: *v}, nil + case url.URL: + return URI{URL: v}, nil + case *URIRef: + if v != nil { + return *v, nil + } + return nil, nil + case URIRef: + return v, nil + case *URI: + if v != nil { + return *v, nil + } + return nil, nil + case URI: + return v, nil + case time.Time: + return Timestamp{Time: v}, nil + case *time.Time: + if v == nil { + break + } + return Timestamp{Time: *v}, nil + case Timestamp: + return v, nil + } + rx := reflect.ValueOf(v) + if rx.Kind() == reflect.Ptr && !rx.IsNil() { + // Allow pointers-to convertible types + return Validate(rx.Elem().Interface()) + } + return nil, fmt.Errorf("invalid CloudEvents value: %#v", v) +} + +// Clone v clones a CloudEvents attribute value, which is one of the valid types: +// +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// +// Returns the same type +// Panics if the type is not valid +func Clone(v interface{}) interface{} { + if v == nil { + return nil + } + switch v := v.(type) { + case bool, int32, string, nil: + return v // Already a CloudEvents type, no validation needed. + case []byte: + clone := make([]byte, len(v)) + copy(clone, v) + return v + case url.URL: + return URI{v} + case *url.URL: + return &URI{*v} + case URIRef: + return v + case *URIRef: + return &URIRef{v.URL} + case URI: + return v + case *URI: + return &URI{v.URL} + case time.Time: + return Timestamp{v} + case *time.Time: + return &Timestamp{*v} + case Timestamp: + return v + case *Timestamp: + return &Timestamp{v.Time} + } + panic(fmt.Errorf("invalid CloudEvents value: %#v", v)) +} + +// ToBool accepts a bool value or canonical "true"/"false" string. +func ToBool(v interface{}) (bool, error) { + v, err := Validate(v) + if err != nil { + return false, err + } + switch v := v.(type) { + case bool: + return v, nil + case string: + return ParseBool(v) + default: + return false, convertErr(true, v) + } +} + +// ToInteger accepts any numeric value in int32 range, or canonical string. +func ToInteger(v interface{}) (int32, error) { + v, err := Validate(v) + if err != nil { + return 0, err + } + switch v := v.(type) { + case int32: + return v, nil + case string: + return ParseInteger(v) + default: + return 0, convertErr(int32(0), v) + } +} + +// ToString returns a string value unaltered. +// +// This function does not perform canonical string encoding, use one of the +// Format functions for that. +func ToString(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case string: + return v, nil + default: + return "", convertErr("", v) + } +} + +// ToBinary returns a []byte value, decoding from base64 string if necessary. +func ToBinary(v interface{}) ([]byte, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case []byte: + return v, nil + case string: + return base64.StdEncoding.DecodeString(v) + default: + return nil, convertErr([]byte(nil), v) + } +} + +// ToURL returns a *url.URL value, parsing from string if necessary. +func ToURL(v interface{}) (*url.URL, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case *URI: + return &v.URL, nil + case URI: + return &v.URL, nil + case *URIRef: + return &v.URL, nil + case URIRef: + return &v.URL, nil + case string: + u, err := url.Parse(v) + if err != nil { + return nil, err + } + return u, nil + default: + return nil, convertErr((*url.URL)(nil), v) + } +} + +// ToTime returns a time.Time value, parsing from RFC3339 string if necessary. +func ToTime(v interface{}) (time.Time, error) { + v, err := Validate(v) + if err != nil { + return time.Time{}, err + } + switch v := v.(type) { + case Timestamp: + return v.Time, nil + case string: + ts, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return time.Time{}, err + } + return ts, nil + default: + return time.Time{}, convertErr(time.Time{}, v) + } +} + +func IsZero(v interface{}) bool { + // Fast path + if v == nil { + return true + } + if s, ok := v.(string); ok && s == "" { + return true + } + return reflect.ValueOf(v).IsZero() +} + +type ConvertErr struct { + // Value being converted + Value interface{} + // Type of attempted conversion + Type reflect.Type + + extra string +} + +func (e *ConvertErr) Error() string { + return fmt.Sprintf("cannot convert %#v to %s%s", e.Value, e.Type, e.extra) +} + +func convertErr(target, v interface{}) *ConvertErr { + return &ConvertErr{Value: v, Type: reflect.TypeOf(target)} +} + +func rangeErr(v interface{}) error { + e := convertErr(int32(0), v) + e.extra = ": out of range" + return e +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000..87d55747 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 00000000..de912e18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1017 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 28 + + skipNever = math.MaxInt32 + + debugDeflate = false +) + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-6 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, +} + +// advancedState contains state for the advanced levels, with bigger hash tables, etc. +type advancedState struct { + // deflate state + length int + offset int + maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + hashMatch [maxMatchLength + minMatchLength]uint32 + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 +} + +type compressor struct { + compressionLevel + + h *huffmanEncoder + w *huffmanBitWriter + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + + window []byte + windowEnd int + blockStart int // window index where current tokens start + err error + + // queued output tokens + tokens tokens + fast fastEnc + state *advancedState + + sync bool // requesting flush + byteAvailable bool // if true, still need to process window[index-1]. +} + +func (d *compressor) fillDeflate(b []byte) int { + s := d.state + if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) + s.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + s.hashOffset += windowSize + if s.hashOffset > maxHashOffset { + delta := s.hashOffset - 1 + s.hashOffset -= delta + s.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range s.hashPrev[:] { + if int(v) > delta { + s.hashPrev[i] = uint32(int(v) - delta) + } else { + s.hashPrev[i] = 0 + } + } + for i, v := range s.hashHead[:] { + if int(v) > delta { + s.hashHead[i] = uint32(int(v) - delta) + } else { + s.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window, d.sync) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok, eof, window, d.sync) + } + } else { + d.w.writeBlock(tok, eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() + return + } + s := d.state + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + // Update window information. + d.windowEnd += n + s.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = minMatchLength - 1 + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < 4 { + return + } + hb := binary.LittleEndian.Uint32(b) + + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 + for i := 1; i < end; i++ { + hb = (hb >> 8) | uint32(b[i+3])<<24 + dst[i] = hash4u(hb, hashBits) + } +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.byteAvailable = false + d.err = nil + if d.state == nil { + return + } + s := d.state + s.index = 0 + s.hashOffset = 1 + s.length = minMatchLength - 1 + s.offset = 0 + s.chainHead = -1 +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + s := d.state + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = debugDeflate + + if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { + return + } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } + + s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + + for { + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - s.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + return + } + } + if s.index < s.maxInsertIndex { + // Update the hash + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[hash] = uint32(s.index + s.hashOffset) + } + prevLength := s.length + prevOffset := s.offset + s.length = minMatchLength - 1 + s.offset = 0 + minIndex := s.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { + s.length = newLength + s.offset = newOffset + } + } + + if prevLength >= minMatchLength && s.length <= prevLength { + // No better match, but check for better match at end... + // + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. + const checkOff = 2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } + } + } + } + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + newIndex := s.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > s.maxInsertIndex { + end = s.maxInsertIndex + } + end += minMatchLength - 1 + startindex := s.index + 1 + if startindex > s.maxInsertIndex { + startindex = s.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + + s.index = newIndex + d.byteAvailable = false + s.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.ii = 0 + } else { + // Reset, if we got a match this run. + if s.length >= minMatchLength { + s.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + s.ii++ + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when s.ii overflows after 64KB. + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) + for j := 0; j < n; j++ { + if s.index >= d.windowEnd-1 { + break + } + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + s.index++ + } + // Flush last byte + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + } + } else { + s.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeFast will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeFast() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < len(d.window) { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 + d.fast.Reset() + return + } + } + + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if d.tokens.n == 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.w.logNewTablePenalty = 10 + d.window = make([]byte, 32<<10) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level == DefaultCompression: + level = 5 + fallthrough + case level >= 1 && level <= 6: + d.w.logNewTablePenalty = 7 + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logNewTablePenalty = 8 + d.state = &advancedState{} + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + d.step = (*compressor).deflateLazy + case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: + d.w.logNewTablePenalty = 7 + d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + d.level = level + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.fast != nil { + d.fast.Reset() + d.windowEnd = 0 + d.tokens.Reset() + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + s := d.state + s.chainHead = -1 + for i := range s.hashHead { + s.hashHead[i] = 0 + } + for i := range s.hashPrev { + s.hashPrev[i] = 0 + } + s.hashOffset = 1 + s.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.Reset() + s.length = minMatchLength - 1 + s.offset = 0 + s.ii = 0 + s.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + d.w.reset(nil) + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + zw, err := NewWriter(w, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = 32 + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = windowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("flate: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") + } + var dw Writer + if err := dw.d.init(w, -windowSize); err != nil { + return nil, err + } + return &dw, nil +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if len(w.dict) > 0 { + // w was created with NewWriterDict + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 00000000..bb36351a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 00000000..c8124b5c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,193 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type tableEntry struct { + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) +} + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 00000000..f70594c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,1182 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 246 +) + +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = [32]uint8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = [32]uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + +// offset code word extra bits. +var offsetExtraBits = [32]int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, +} + +var offsetCombined = [32]uint32{} + +func init() { + var offsetBase = [32]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) + } +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint8 + nbytes uint8 + lastHuffMan bool + literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logNewTablePenalty uint + bytes [256 + 8]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 +} + +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logNewTablePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { + a := t.offHist[:offsetCodeCount] + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalEncoding.codes[256:literalCount] + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + return true +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) + w.nbits += nb + if w.nbits >= 48 { + w.writeOutBits() + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen[:] // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = litEnc.codes[i].len() + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = offEnc.codes[i].len() + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() + size = header + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + + extraBits + return size, numCodegens +} + +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() + if w.nbits >= 48 { + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) + n += 6 + + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + + w.nbytes = n +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord = uint32(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[codeWord]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + } + } +} + +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate() + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + extraBits = w.extraBitSize() + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + sync = sync || eof + if sync { + tokens.AddEOB() + } + + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } + + var size int + + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table. + // Use the previous header size as the best estimate. + newSize := w.lastHeader + tokens.EstimatedBits() + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + + // We want a new block/table + if w.lastHeader == 0 { + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + + var numCodegens int + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + if !sync { + w.lastHeader, _ = w.headerSize() + } + w.lastHuffMan = false + } + + if sync { + w.lastHeader = 0 + } + // Write the tokens. + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) +} + +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist + + if t.n == 0 { + return + } + if filled { + return maxNumLit, maxNumDist + } + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + return +} + +func (w *huffmanBitWriter) generate() { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + if len(tokens) == 0 { + return + } + + // Only last token should be endBlockMarker. + var deferEOB bool + if tokens[len(tokens)-1] == endBlockMarker { + tokens = tokens[:len(tokens)-1] + deferEOB = true + } + + // Create slices up to the next power of two to avoid bounds checks. + lits := leCodes[:256] + offs := oeCodes[:32] + lengths := leCodes[lengthCodesStart:] + lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + for _, t := range tokens { + if t < 256 { + //w.writeCode(lits[t.literal()]) + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + continue + } + + // Write the length + length := t.length() + lengthCode := lengthCode(length) & 31 + if false { + w.writeCode(lengths[lengthCode]) + } else { + // inlined + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] + //w.writeBits(extraLength, extraLengthBits) + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + // Write the offset + offset := t.offset() + offsetCode := (offset >> 16) & 31 + if false { + w.writeCode(offs[offsetCode]) + } else { + // inlined + c := offs[offsetCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if deferEOB { + w.writeCode(leCodes[endBlockMarker]) + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq[:] { + w.literalFreq[i] = 0 + } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + // Add everything as literals + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty + } + + // Store bytes, if we don't get a reasonable improvement. + if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + if w.lastHeader > 0 { + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) + + if estBits < reuseSize { + if debugDeflate { + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") + } + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } + } + + count := 0 + if w.lastHeader == 0 { + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } + } + + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + + if eof || sync { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 00000000..be7b58b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,417 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "math/bits" +) + +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + +// hcode is a huffman code with a bit code and bit length. +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 +} + +type huffmanEncoder struct { + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode +} + +type literalNode struct { + literal uint16 + freq uint16 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + // Make capacity to next power of two. + c := uint(bits.Len32(uint32(size - 1))) + return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// +// maxBits The maximum number of bits that should be used to encode any literal. +// +// Must be less than 16. +// +// return An integer array in which array[i] indicates the number of literals +// +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := uint32(maxBits) + for level < 16 { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + sortByLiteral(chunk) + for _, node := range chunk { + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { + list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + codes[i] = 0 + } + } + list[count] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + sortByFreq(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +// atLeastOne clamps the result between 1 and 15. +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 + } + if v > 15 { + return 15 + } + return v +} + +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ + } + } +} + +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. + h = h[:256] + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] + } + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 00000000..6c05ba8c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,159 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 00000000..93f1aea1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 00000000..2f410d64 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,829 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "compress/flate" + "fmt" + "io" + "math/bits" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false +) + +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError = flate.CorruptInputError + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError = flate.ReadError + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError = flate.WriteError + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + maxRead int // the maximum number of bits we can read and not overread + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = new([huffmanNumChunks]uint16) + } + + if h.maxRead != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute maxRead and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint16, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint16(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// Reader is the actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +type step uint8 + +const ( + copyData step = iota + 1 + nextBlock + huffmanBytesBuffer + huffmanBytesReader + huffmanBufioReader + huffmanStringsReader + huffmanGenericReader +) + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Next step in the decompression, + // and decompression state. + step step + stepState int + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Input bits, in top of b. + b uint32 + + nb uint + final bool +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("predefinied huffman block") + } + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("dynamic huffman block") + } + default: + // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + + f.doStep() + + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// WriteTo implements the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.doStep() + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + if debugDecode { + fmt.Println("i==0") + } + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } + return err + } + } + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 + f.nb -= nb + if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the maxRead bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 + } + + return nil +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { + if debugDecode { + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) + } + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = int(n) + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = nextBlock +} + +func (f *decompressor) doStep() { + switch f.step { + case copyData: + f.copyData() + case nextBlock: + f.nextBlock() + case huffmanBytesBuffer: + f.huffmanBytesBuffer() + case huffmanBytesReader: + f.huffmanBytesReader() + case huffmanBufioReader: + f.huffmanBufioReader() + case huffmanStringsReader: + f.huffmanStringsReader() + case huffmanGenericReader: + f.huffmanGenericReader() + default: + panic("BUG: unexpected step state") + } +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 00000000..2b2f993f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,1283 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +func (f *decompressor) huffmanBlockDecoder() { + switch f.r.(type) { + case *bytes.Buffer: + f.huffmanBytesBuffer() + case *bytes.Reader: + f.huffmanBytesReader() + case *bufio.Reader: + f.huffmanBufioReader() + case *strings.Reader: + f.huffmanStringsReader() + case Reader: + f.huffmanGenericReader() + default: + f.huffmanGenericReader() + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 00000000..703b9a89 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,241 @@ +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, tableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + cv = now + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, tableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashLen(x, tableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + cv = x >> 8 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 00000000..876dfbe3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,214 @@ +package flate + +import "fmt" + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, bTableBits, hashBytes) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, bTableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, i) + nextHash := hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} + currHash := hashLen(x>>16, bTableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { + cv = x >> 24 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 00000000..7aa2b72a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,241 @@ +package flate + +import "fmt" + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [1 << 16]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 7 + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load6432(src, nextS) + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} + + // Check both candidates + candidate = candidates.Cur + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { + break + } + // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t}, + } + } + goto emitRemainder + } + + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} + } + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2}, + } + x >>= 8 + prevHash = hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1}, + } + x >>= 8 + currHash := hashLen(x, tableBits, hashBytes) + candidates := e.table[currHash] + cv = x + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur}, + } + + // Check both candidates + candidate = candidates.Cur + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue + } + } + cv = x >> 8 + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 00000000..23c08b32 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,221 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 00000000..1f61ec18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,708 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// fastEncL5Window is a level 5 encoder, +// but with a custom window size. +type fastEncL5Window struct { + hist []byte + cur int32 + maxOffset int32 + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + maxMatchOffset := e.maxOffset + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// Reset the encoding table. +func (e *fastEncL5Window) Reset() { + // We keep the same allocs, since we are compressing the same block sizes. + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= int32(bufferReset) { + e.cur += e.maxOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +func (e *fastEncL5Window) addBlock(src []byte) int32 { + // check if we have space already + maxMatchOffset := e.maxOffset + + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < int(maxMatchOffset*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go new file mode 100644 index 00000000..f1e9d98f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -0,0 +1,325 @@ +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end-of-match... + if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] + // Test current + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if off < maxMatchOffset { + if off > 0 && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + // Test next: + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 + if off > 0 && off < maxMatchOffset && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go new file mode 100644 index 00000000..4bd38858 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s new file mode 100644 index 00000000..9a7655c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go new file mode 100644 index 00000000..ad5cd814 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 00000000..6ed28061 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 00000000..1b7a2cbd --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,40 @@ +//go:build !amd64 +// +build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go new file mode 100644 index 00000000..f3d4139e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -0,0 +1,318 @@ +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true, nil) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false, nil) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows compressing directly to a Writer without retaining state. +// When returning everything will be flushed. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + + for len(in) > 0 { + todo := in + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] + } + inOrg := in + in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } + // Compress + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(uncompressed), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) + } else { + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil + dst.Reset() + } + if bw.err != nil { + return bw.err + } + } + if !eof { + // Align, only a stored block can do that. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte, startAt int16) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src)-int(startAt) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = 0 + return + } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } + + s := startAt + 1 + nextEmit := startAt + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 00000000..d818790c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,379 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits + lengthShift = 22 + offsetMask = 1<maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nFilled = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nFilled++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nFilled++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.AddLiteral(tok.literal()) + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) + } +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + for _, v := range lit { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } +} + +func (t *tokens) AddLiteral(lit byte) { + t.tokens[t.n] = token(lit) + t.litHist[lit]++ + t.n++ +} + +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + +// EstimatedBits will return an minimum size estimated by an *optimal* +// compression of the block. +// The size of the block +func (t *tokens) EstimatedBits() int { + shannon := float32(0) + bits := int(0) + nMatches := 0 + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) + for _, v := range t.litHist[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } + } + // Just add 15 for EOB + shannon += 15 + for i, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(lengthExtraBits[i&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(offsetExtraBits[i&31]) * int(v) + } + } + } + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDeflate { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode&31]++ + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + t.extraHist[lengthCodes1[uint8(xl)]]++ + t.offHist[oc&31]++ + t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } + +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if false { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off&255] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } + if off < uint32(len(offsetCodes)) { + return offsetCodes[uint8(off)] + } + return offsetCodes14[uint8(off>>7)] +} diff --git a/vendor/github.com/nats-io/nats.go/.gitignore b/vendor/github.com/nats-io/nats.go/.gitignore new file mode 100644 index 00000000..ae4871f4 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.gitignore @@ -0,0 +1,45 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# Emacs +*~ +\#*\# +.\#* + +# vi/vim +.??*.swp + +# Mac +.DS_Store + +# Eclipse +.project +.settings/ + +# bin + +# Goland +.idea + +# VS Code +.vscode \ No newline at end of file diff --git a/vendor/github.com/nats-io/nats.go/.golangci.yaml b/vendor/github.com/nats-io/nats.go/.golangci.yaml new file mode 100644 index 00000000..be66189e --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.golangci.yaml @@ -0,0 +1,13 @@ +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + exclude-rules: + - linters: + - errcheck + text: "Unsubscribe" + - linters: + - errcheck + text: "msg.Ack" + - linters: + - errcheck + text: "watcher.Stop" diff --git a/vendor/github.com/nats-io/nats.go/.travis.yml b/vendor/github.com/nats-io/nats.go/.travis.yml new file mode 100644 index 00000000..1505f773 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.travis.yml @@ -0,0 +1,36 @@ +language: go +go: +- "1.21.x" +- "1.20.x" +go_import_path: github.com/nats-io/nats.go +install: +- go get -t ./... +- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin +- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then + go install github.com/mattn/goveralls@latest; + go install github.com/wadey/gocovmerge@latest; + go install honnef.co/go/tools/cmd/staticcheck@latest; + go install github.com/client9/misspell/cmd/misspell@latest; + fi +before_script: +- $(exit $(go fmt ./... | wc -l)) +- go vet -modfile=go_test.mod ./... +- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then + find . -type f -name "*.go" | xargs misspell -error -locale US; + GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...; + fi +- golangci-lint run ./jetstream/... +script: +- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off +- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testing; fi +after_success: +- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi + +jobs: + include: + - name: "Go: 1.21.x (nats-server@main)" + go: "1.21.x" + before_script: + - go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main + allow_failures: + - name: "Go: 1.21.x (nats-server@main)" diff --git a/vendor/github.com/nats-io/nats.go/.words b/vendor/github.com/nats-io/nats.go/.words new file mode 100644 index 00000000..24be7f62 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.words @@ -0,0 +1,106 @@ +1 + +derek +dlc +ivan + +acknowledgement/SM +arity +deduplication/S +demarshal/SDG +durables +iff +observable/S +redelivery/S +retransmitting +retry/SB + +SlowConsumer + +AppendInt +ReadMIMEHeader + +clientProtoZero +jetstream +v1 +v2 + +ack/SGD +auth +authToken +chans +creds +config/S +cseq +impl +msgh +msgId +mux/S +nack +ptr +puback +scanf +stderr +stdout +structs +tm +todo +unsub/S + +permessage +permessage-deflate +urlA +urlB +websocket +ws +wss + +NKey +pList + +backend/S +backoff/S +decompressor/CGS +inflight +inlined +lookups +reconnection/MS +redeliver/ADGS +responder/S +rewrap/S +rollup/S +unreceive/DRSZGB +variadic +wakeup/S +whitespace +wrap/AS + +omitempty + +apache +html +ietf +www + +sum256 +32bit/S +64bit/S +64k +128k +512k + +hacky +handroll/D + +rfc6455 +rfc7692 +0x00 +0xff +20x +40x +50x + +ErrXXX + +atlanta +eu diff --git a/vendor/github.com/nats-io/nats.go/.words.readme b/vendor/github.com/nats-io/nats.go/.words.readme new file mode 100644 index 00000000..9d9f5cbb --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.words.readme @@ -0,0 +1,25 @@ +The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries +but populates the dictionary with identifiers from the Go source. + + + +Alas, no comments are allowed in the .words file and newer versions of gospel +error out on seeing them. This is really a hunspell restriction. + +We assume en_US hunspell dictionaries are installed and used. +The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) +Invoke `hunspell -D` to see the actual locations. + +Words which are in the base dictionary can't have extra affix rules added to +them, so we have to start with the affixed variant we want to add. +Thus `creds` rather than `cred/S` and so on. + +So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, +we have to use unreceive as the stem. + +We can't define our own affix or compound rules, +to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} + +The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... +"permessage-deflate", which is an RFC7692 registered extension for websockets. +We have to explicitly list "permessage". diff --git a/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md b/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..b850d49e --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/nats-io/nats.go/GOVERNANCE.md b/vendor/github.com/nats-io/nats.go/GOVERNANCE.md new file mode 100644 index 00000000..1d5a7be3 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/GOVERNANCE.md @@ -0,0 +1,3 @@ +# NATS Go Client Governance + +NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nats.go/LICENSE b/vendor/github.com/nats-io/nats.go/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/nats.go/MAINTAINERS.md b/vendor/github.com/nats-io/nats.go/MAINTAINERS.md new file mode 100644 index 00000000..23214655 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/MAINTAINERS.md @@ -0,0 +1,8 @@ +# Maintainers + +Maintainership is on a per project basis. + +### Maintainers + - Derek Collison [@derekcollison](https://github.com/derekcollison) + - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) + - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/vendor/github.com/nats-io/nats.go/README.md b/vendor/github.com/nats-io/nats.go/README.md new file mode 100644 index 00000000..042733da --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/README.md @@ -0,0 +1,480 @@ +# NATS - Go Client +A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). + +[![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url] + +[License-Url]: https://www.apache.org/licenses/LICENSE-2.0 +[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg +[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go +[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go +[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go +[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main +[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go +[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c +[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main +[Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main + +## Installation + +```bash +# Go client +go get github.com/nats-io/nats.go/ + +# Server +go get github.com/nats-io/nats-server +``` + +When using or transitioning to Go modules support: + +```bash +# Go client latest or explicit version +go get github.com/nats-io/nats.go/@latest +go get github.com/nats-io/nats.go/@v1.31.0 + +# For latest NATS Server, add /v2 at the end +go get github.com/nats-io/nats-server/v2 + +# NATS Server v1 is installed otherwise +# go get github.com/nats-io/nats-server +``` + +## Basic Usage + +```go +import "github.com/nats-io/nats.go" + +// Connect to a server +nc, _ := nats.Connect(nats.DefaultURL) + +// Simple Publisher +nc.Publish("foo", []byte("Hello World")) + +// Simple Async Subscriber +nc.Subscribe("foo", func(m *nats.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +// Responding to a request message +nc.Subscribe("request", func(m *nats.Msg) { + m.Respond([]byte("answer is 42")) +}) + +// Simple Sync Subscriber +sub, err := nc.SubscribeSync("foo") +m, err := sub.NextMsg(timeout) + +// Channel Subscriber +ch := make(chan *nats.Msg, 64) +sub, err := nc.ChanSubscribe("foo", ch) +msg := <- ch + +// Unsubscribe +sub.Unsubscribe() + +// Drain +sub.Drain() + +// Requests +msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) + +// Replies +nc.Subscribe("help", func(m *nats.Msg) { + nc.Publish(m.Reply, []byte("I can help!")) +}) + +// Drain connection (Preferred for responders) +// Close() not needed if this is called. +nc.Drain() + +// Close connection +nc.Close() +``` + +## JetStream + +JetStream is the built-in NATS persistence system. `nats.go` provides a built-in +API enabling both managing JetStream assets as well as publishing/consuming +persistent messages. + +### Basic usage + +```go +// connect to nats server +nc, _ := nats.Connect(nats.DefaultURL) + +// create jetstream context from nats connection +js, _ := jetstream.New(nc) + +ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) +defer cancel() + +// get existing stream handle +stream, _ := js.Stream(ctx, "foo") + +// retrieve consumer handle from a stream +cons, _ := stream.Consumer(ctx, "cons") + +// consume messages from the consumer in callback +cc, _ := cons.Consume(func(msg jetstream.Msg) { + fmt.Println("Received jetstream message: ", string(msg.Data())) + msg.Ack() +}) +defer cc.Stop() +``` + +To find more information on `nats.go` JetStream API, visit +[`jetstream/README.md`](jetstream/README.md) + +> The current JetStream API replaces the [legacy JetStream API](legacy_jetstream.md) + +## Service API + +The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The +services API is currently in beta release. + +## Encoded Connections + +```go + +nc, _ := nats.Connect(nats.DefaultURL) +c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer c.Close() + +// Simple Publisher +c.Publish("foo", "Hello World") + +// Simple Async Subscriber +c.Subscribe("foo", func(s string) { + fmt.Printf("Received a message: %s\n", s) +}) + +// EncodedConn can Publish any raw Go type using the registered Encoder +type person struct { + Name string + Address string + Age int +} + +// Go type Subscriber +c.Subscribe("hello", func(p *person) { + fmt.Printf("Received a person: %+v\n", p) +}) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} + +// Go type Publisher +c.Publish("hello", me) + +// Unsubscribe +sub, err := c.Subscribe("foo", nil) +// ... +sub.Unsubscribe() + +// Requests +var response string +err = c.Request("help", "help me", &response, 10*time.Millisecond) +if err != nil { + fmt.Printf("Request failed: %v\n", err) +} + +// Replying +c.Subscribe("help", func(subj, reply string, msg string) { + c.Publish(reply, "I can help!") +}) + +// Close connection +c.Close(); +``` + +## New Authentication (Nkeys and User Credentials) +This requires server with version >= 2.0.0 + +NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys. +The simplest form is to use the helper method UserCredentials(credsFilepath). +```go +nc, err := nats.Connect(url, nats.UserCredentials("user.creds")) +``` + +The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server. +The core client library never has direct access to your private key and simply performs the callback for signing the server challenge. +The helper will load and wipe and erase memory it uses for each connect or reconnect. + +The helper also can take two entries, one for the JWT and one for the NKey seed file. +```go +nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk")) +``` + +You can also set the callback handlers directly and manage challenge signing directly. +```go +nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB)) +``` + +Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt +```bash +> cat seed.txt +# This is my seed nkey! +SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM +``` + +This is a helper function which will load and decode and do the proper signing for the server nonce. +It will clear memory in between invocations. +You can choose to use the low level option and provide the public key and a signature callback on your own. + +```go +opt, err := nats.NkeyOptionFromSeed("seed.txt") +nc, err := nats.Connect(serverUrl, opt) + +// Direct +nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB)) +``` + +## TLS + +```go +// tls as a scheme will enable secure connections by default. This will also verify the server name. +nc, err := nats.Connect("tls://nats.demo.io:4443") + +// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. +// We provide a helper method to make this case easier. +nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) + +// If the server requires client certificate, there is an helper function for that too: +cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") +nc, err = nats.Connect("tls://localhost:4443", cert) + +// You can also supply a complete tls.Config + +certFile := "./configs/certs/client-cert.pem" +keyFile := "./configs/certs/client-key.pem" +cert, err := tls.LoadX509KeyPair(certFile, keyFile) +if err != nil { + t.Fatalf("error parsing X509 certificate/key pair: %v", err) +} + +config := &tls.Config{ + ServerName: opts.Host, + Certificates: []tls.Certificate{cert}, + RootCAs: pool, + MinVersion: tls.VersionTLS12, +} + +nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) +if err != nil { + t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) +} + +``` + +## Using Go Channels (netchan) + +```go +nc, _ := nats.Connect(nats.DefaultURL) +ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer ec.Close() + +type person struct { + Name string + Address string + Age int +} + +recvCh := make(chan *person) +ec.BindRecvChan("hello", recvCh) + +sendCh := make(chan *person) +ec.BindSendChan("hello", sendCh) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} + +// Send via Go channels +sendCh <- me + +// Receive via Go channels +who := <- recvCh +``` + +## Wildcard Subscriptions + +```go + +// "*" matches any token, at any level of the subject. +nc.Subscribe("foo.*.baz", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +nc.Subscribe("foo.bar.*", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// ">" matches any length of the tail of a subject, and can only be the last token +// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' +nc.Subscribe("foo.>", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// Matches all of the above +nc.Publish("foo.bar.baz", []byte("Hello World")) + +``` + +## Queue Groups + +```go +// All subscriptions with the same queue name will form a queue group. +// Each message will be delivered to only one subscriber per queue group, +// using queuing semantics. You can have as many queue groups as you wish. +// Normal subscribers will continue to work as expected. + +nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { + received += 1; +}) +``` + +## Advanced Usage + +```go + +// Normally, the library will return an error when trying to connect and +// there is no server running. The RetryOnFailedConnect option will set +// the connection in reconnecting state if it failed to connect right away. +nc, err := nats.Connect(nats.DefaultURL, + nats.RetryOnFailedConnect(true), + nats.MaxReconnects(10), + nats.ReconnectWait(time.Second), + nats.ReconnectHandler(func(_ *nats.Conn) { + // Note that this will be invoked for the first asynchronous connect. + })) +if err != nil { + // Should not return an error even if it can't connect, but you still + // need to check in case there are some configuration errors. +} + +// Flush connection to server, returns when all messages have been processed. +nc.Flush() +fmt.Println("All clear!") + +// FlushTimeout specifies a timeout value as well. +err := nc.FlushTimeout(1*time.Second) +if err != nil { + fmt.Println("All clear!") +} else { + fmt.Println("Flushed timed out!") +} + +// Auto-unsubscribe after MAX_WANTED messages received +const MAX_WANTED = 10 +sub, err := nc.Subscribe("foo") +sub.AutoUnsubscribe(MAX_WANTED) + +// Multiple connections +nc1 := nats.Connect("nats://host1:4222") +nc2 := nats.Connect("nats://host2:4222") + +nc1.Subscribe("foo", func(m *Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +nc2.Publish("foo", []byte("Hello World!")); + +``` + +## Clustered Usage + +```go + +var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" + +nc, err := nats.Connect(servers) + +// Optionally set ReconnectWait and MaxReconnect attempts. +// This example means 10 seconds total per backend. +nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) + +// You can also add some jitter for the reconnection. +// This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections. +// If not specified, the library defaults to 100 milliseconds and 1 second, respectively. +nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second)) + +// You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried +// all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter. +// The library will pass the number of times it went through the whole list. +nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration { + return someBackoffFunction(attempts) +})) + +// Optionally disable randomization of the server pool +nc, err = nats.Connect(servers, nats.DontRandomize()) + +// Setup callbacks to be notified on disconnects, reconnects and connection closed. +nc, err = nats.Connect(servers, + nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + fmt.Printf("Got disconnected! Reason: %q\n", err) + }), + nats.ReconnectHandler(func(nc *nats.Conn) { + fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) + }), + nats.ClosedHandler(func(nc *nats.Conn) { + fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) + }) +) + +// When connecting to a mesh of servers with auto-discovery capabilities, +// you may need to provide a username/password or token in order to connect +// to any server in that mesh when authentication is required. +// Instead of providing the credentials in the initial URL, you will use +// new option setters: +nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) + +// For token based authentication: +nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) + +// You can even pass the two at the same time in case one of the server +// in the mesh requires token instead of user name and password. +nc, err = nats.Connect("nats://localhost:4222", + nats.UserInfo("foo", "bar"), + nats.Token("S3cretT0ken")) + +// Note that if credentials are specified in the initial URLs, they take +// precedence on the credentials specified through the options. +// For instance, in the connect call below, the client library will use +// the user "my" and password "pwd" to connect to localhost:4222, however, +// it will use username "foo" and password "bar" when (re)connecting to +// a different server URL that it got as part of the auto-discovery. +nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) + +``` + +## Context support (+Go 1.7) + +```go +ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) +defer cancel() + +nc, err := nats.Connect(nats.DefaultURL) + +// Request with context +msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar")) + +// Synchronous subscriber with context +sub, err := nc.SubscribeSync("foo") +msg, err := sub.NextMsgWithContext(ctx) + +// Encoded Request with context +c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +type request struct { + Message string `json:"message"` +} +type response struct { + Code int `json:"code"` +} +req := &request{Message: "Hello"} +resp := &response{} +err := c.RequestWithContext(ctx, "foo", req, resp) +``` + +## License + +Unless otherwise noted, the NATS source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large) diff --git a/vendor/github.com/nats-io/nats.go/context.go b/vendor/github.com/nats-io/nats.go/context.go new file mode 100644 index 00000000..c4ef4be1 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/context.go @@ -0,0 +1,244 @@ +// Copyright 2016-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "context" + "reflect" +) + +// RequestMsgWithContext takes a context, a subject and payload +// in bytes and request expecting a single response. +func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) { + if msg == nil { + return nil, ErrInvalidMsg + } + hdr, err := msg.headerBytes() + if err != nil { + return nil, err + } + return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data) +} + +// RequestWithContext takes a context, a subject and payload +// in bytes and request expecting a single response. +func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + return nc.requestWithContext(ctx, subj, nil, data) +} + +func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if nc == nil { + return nil, ErrInvalidConnection + } + // Check whether the context is done already before making + // the request. + if ctx.Err() != nil { + return nil, ctx.Err() + } + + var m *Msg + var err error + + // If user wants the old style. + if nc.useOldRequestStyle() { + m, err = nc.oldRequestWithContext(ctx, subj, hdr, data) + } else { + mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) + if err != nil { + return nil, err + } + + var ok bool + + select { + case m, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-ctx.Done(): + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ctx.Err() + } + } + // Check for no responder status. + if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + m, err = nil, ErrNoResponders + } + return m, err +} + +// oldRequestWithContext utilizes inbox and subscription per request. +func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { + inbox := nc.NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.publish(subj, inbox, hdr, data) + if err != nil { + return nil, err + } + + return s.NextMsgWithContext(ctx) +} + +func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if s == nil { + return nil, ErrBadSubscription + } + if ctx.Err() != nil { + return nil, ctx.Err() + } + + s.mu.Lock() + err := s.validateNextMsgState(pullSubInternal) + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + // If something is available right away, let's optimize that case. + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } + return msg, nil + default: + // If internal and we don't want to wait, signal that there is no + // message in the internal queue. + if pullSubInternal && !waitIfNoMsg { + return nil, errNoMessages + } + } + + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + + return msg, nil +} + +// NextMsgWithContext takes a context and returns the next message +// available to a synchronous subscriber, blocking until it is delivered +// or context gets canceled. +func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { + return s.nextMsgWithContext(ctx, false, true) +} + +// FlushWithContext will allow a context to control the duration +// of a Flush() call. This context should be non-nil and should +// have a deadline set. We will return an error if none is present. +func (nc *Conn) FlushWithContext(ctx context.Context) error { + if nc == nil { + return ErrInvalidConnection + } + if ctx == nil { + return ErrInvalidContext + } + _, ok := ctx.Deadline() + if !ok { + return ErrNoDeadlineContext + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + // Create a buffered channel to prevent chan send to block + // in processPong() + ch := make(chan struct{}, 1) + nc.sendPing(ch) + nc.mu.Unlock() + + var err error + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-ctx.Done(): + err = ctx.Err() + } + + if err != nil { + nc.removeFlushEntry(ch) + } + + return err +} + +// RequestWithContext will create an Inbox and perform a Request +// using the provided cancellation context with the Inbox reply +// for the data v. A response will be decoded into the vPtr last parameter. +func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error { + if ctx == nil { + return ErrInvalidContext + } + + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.RequestWithContext(ctx, subject, b) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err := c.Enc.Decode(m.Subject, m.Data, vPtr) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/nats-io/nats.go/dependencies.md b/vendor/github.com/nats-io/nats.go/dependencies.md new file mode 100644 index 00000000..ec9ab3c6 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/dependencies.md @@ -0,0 +1,15 @@ +# External Dependencies + +This file lists the dependencies used in this repository. + +| Dependency | License | +|-----------------------------------|--------------| +| Go | BSD 3-Clause | +| github.com/golang/protobuf/proto | BSD-3-Clause | +| github.com/klauspost/compress | BSD-3-Clause | +| github.com/nats-io/nats-server/v2 | Apache-2.0 | +| github.com/nats-io/nkeys | Apache-2.0 | +| github.com/nats-io/nuid | Apache-2.0 | +| go.uber.org/goleak | MIT | +| golang.org/x/text | BSD-3-Clause | +| google.golang.org/protobuf | BSD-3-Clause | diff --git a/vendor/github.com/nats-io/nats.go/enc.go b/vendor/github.com/nats-io/nats.go/enc.go new file mode 100644 index 00000000..a1c54f24 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/enc.go @@ -0,0 +1,269 @@ +// Copyright 2012-2019 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + // Default Encoders + "github.com/nats-io/nats.go/encoders/builtin" +) + +// Encoder interface is for all register encoders +type Encoder interface { + Encode(subject string, v any) ([]byte, error) + Decode(subject string, data []byte, vPtr any) error +} + +var encMap map[string]Encoder +var encLock sync.Mutex + +// Indexed names into the Registered Encoders. +const ( + JSON_ENCODER = "json" + GOB_ENCODER = "gob" + DEFAULT_ENCODER = "default" +) + +func init() { + encMap = make(map[string]Encoder) + // Register json, gob and default encoder + RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{}) + RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{}) + RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{}) +} + +// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to +// a nats server and have an extendable encoder system that will encode and decode messages +// from raw Go types. +type EncodedConn struct { + Conn *Conn + Enc Encoder +} + +// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered +// encoder. +func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { + if c == nil { + return nil, errors.New("nats: Nil Connection") + } + if c.IsClosed() { + return nil, ErrConnectionClosed + } + ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} + if ec.Enc == nil { + return nil, fmt.Errorf("no encoder registered for '%s'", encType) + } + return ec, nil +} + +// RegisterEncoder will register the encType with the given Encoder. Useful for customization. +func RegisterEncoder(encType string, enc Encoder) { + encLock.Lock() + defer encLock.Unlock() + encMap[encType] = enc +} + +// EncoderForType will return the registered Encoder for the encType. +func EncoderForType(encType string) Encoder { + encLock.Lock() + defer encLock.Unlock() + return encMap[encType] +} + +// Publish publishes the data argument to the given subject. The data argument +// will be encoded using the associated encoder. +func (c *EncodedConn) Publish(subject string, v any) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, _EMPTY_, nil, b) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (c *EncodedConn) PublishRequest(subject, reply string, v any) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, reply, nil, b) +} + +// Request will create an Inbox and perform a Request() call +// with the Inbox reply for the data v. A response will be +// decoded into the vPtr Response. +func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.Request(subject, b, timeout) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err = c.Enc.Decode(m.Subject, m.Data, vPtr) + } + return err +} + +// Handler is a specific callback used for Subscribe. It is generalized to +// an any, but we will discover its format and arguments at runtime +// and perform the correct callback, including demarshaling encoded data +// back into the appropriate struct based on the signature of the Handler. +// +// Handlers are expected to have one of four signatures. +// +// type person struct { +// Name string `json:"name,omitempty"` +// Age uint `json:"age,omitempty"` +// } +// +// handler := func(m *Msg) +// handler := func(p *person) +// handler := func(subject string, o *obj) +// handler := func(subject, reply string, o *obj) +// +// These forms allow a callback to request a raw Msg ptr, where the processing +// of the message from the wire is untouched. Process a JSON representation +// and demarshal it into the given struct, e.g. person. +// There are also variants where the callback wants either the subject, or the +// subject and the reply subject. +type Handler any + +// Dissect the cb Handler's signature +func argInfo(cb Handler) (reflect.Type, int) { + cbType := reflect.TypeOf(cb) + if cbType.Kind() != reflect.Func { + panic("nats: Handler needs to be a func") + } + numArgs := cbType.NumIn() + if numArgs == 0 { + return nil, numArgs + } + return cbType.In(numArgs - 1), numArgs +} + +var emptyMsgType = reflect.TypeOf(&Msg{}) + +// Subscribe will create a subscription on the given subject and process incoming +// messages using the specified Handler. The Handler should be a func that matches +// a signature from the description of Handler from above. +func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, _EMPTY_, cb) +} + +// QueueSubscribe will create a queue subscription on the given subject and process +// incoming messages using the specified Handler. The Handler should be a func that +// matches a signature from the description of Handler from above. +func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, queue, cb) +} + +// Internal implementation that all public functions will use. +func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { + if cb == nil { + return nil, errors.New("nats: Handler required for EncodedConn Subscription") + } + argType, numArgs := argInfo(cb) + if argType == nil { + return nil, errors.New("nats: Handler requires at least one argument") + } + + cbValue := reflect.ValueOf(cb) + wantsRaw := (argType == emptyMsgType) + + natsCB := func(m *Msg) { + var oV []reflect.Value + if wantsRaw { + oV = []reflect.Value{reflect.ValueOf(m)} + } else { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach.push(func() { + c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) + }) + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + + // Callback Arity + switch numArgs { + case 1: + oV = []reflect.Value{oPtr} + case 2: + subV := reflect.ValueOf(m.Subject) + oV = []reflect.Value{subV, oPtr} + case 3: + subV := reflect.ValueOf(m.Subject) + replyV := reflect.ValueOf(m.Reply) + oV = []reflect.Value{subV, replyV, oPtr} + } + + } + cbValue.Call(oV) + } + + return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil) +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { + return c.Conn.FlushTimeout(timeout) +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (c *EncodedConn) Flush() error { + return c.Conn.Flush() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush(), etc. +func (c *EncodedConn) Close() { + c.Conn.Close() +} + +// Drain will put a connection into a drain state. All subscriptions will +// immediately be put into a drain state. Upon completion, the publishers +// will be drained and can not publish any additional messages. Upon draining +// of the publishers, the connection will be closed. Use the ClosedCB() +// option to know when the connection has moved from draining to closed. +func (c *EncodedConn) Drain() error { + return c.Conn.Drain() +} + +// LastError reports the last error encountered via the Connection. +func (c *EncodedConn) LastError() error { + return c.Conn.LastError() +} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go new file mode 100644 index 00000000..65c2d68b --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go @@ -0,0 +1,117 @@ +// Copyright 2012-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builtin + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "unsafe" +) + +// DefaultEncoder implementation for EncodedConn. +// This encoder will leave []byte and string untouched, but will attempt to +// turn numbers into appropriate strings that can be decoded. It will also +// propely encoded and decode bools. If will encode a struct, but if you want +// to properly handle structures you should use JsonEncoder. +type DefaultEncoder struct { + // Empty +} + +var trueB = []byte("true") +var falseB = []byte("false") +var nilB = []byte("") + +// Encode +func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) { + switch arg := v.(type) { + case string: + bytes := *(*[]byte)(unsafe.Pointer(&arg)) + return bytes, nil + case []byte: + return arg, nil + case bool: + if arg { + return trueB, nil + } else { + return falseB, nil + } + case nil: + return nilB, nil + default: + var buf bytes.Buffer + fmt.Fprintf(&buf, "%+v", arg) + return buf.Bytes(), nil + } +} + +// Decode +func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error { + // Figure out what it's pointing to... + sData := *(*string)(unsafe.Pointer(&data)) + switch arg := vPtr.(type) { + case *string: + *arg = sData + return nil + case *[]byte: + *arg = data + return nil + case *int: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int(n) + return nil + case *int32: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int32(n) + return nil + case *int64: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int64(n) + return nil + case *float32: + n, err := strconv.ParseFloat(sData, 32) + if err != nil { + return err + } + *arg = float32(n) + return nil + case *float64: + n, err := strconv.ParseFloat(sData, 64) + if err != nil { + return err + } + *arg = float64(n) + return nil + case *bool: + b, err := strconv.ParseBool(sData) + if err != nil { + return err + } + *arg = b + return nil + default: + vt := reflect.TypeOf(arg).Elem() + return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) + } +} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go new file mode 100644 index 00000000..4e7cecba --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go @@ -0,0 +1,45 @@ +// Copyright 2013-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builtin + +import ( + "bytes" + "encoding/gob" +) + +// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/gob to Marshal +// and Unmarshal most types, including structs. +type GobEncoder struct { + // Empty +} + +// FIXME(dlc) - This could probably be more efficient. + +// Encode +func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) { + b := new(bytes.Buffer) + enc := gob.NewEncoder(b) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Decode +func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) { + dec := gob.NewDecoder(bytes.NewBuffer(data)) + err = dec.Decode(vPtr) + return +} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go new file mode 100644 index 00000000..9b6ffc01 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go @@ -0,0 +1,56 @@ +// Copyright 2012-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builtin + +import ( + "encoding/json" + "strings" +) + +// JsonEncoder is a JSON Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/json to Marshal +// and Unmarshal most types, including structs. +type JsonEncoder struct { + // Empty +} + +// Encode +func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + return b, nil +} + +// Decode +func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) { + switch arg := vPtr.(type) { + case *string: + // If they want a string and it is a JSON string, strip quotes + // This allows someone to send a struct but receive as a plain string + // This cast should be efficient for Go 1.3 and beyond. + str := string(data) + if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { + *arg = str[1 : len(str)-1] + } else { + *arg = str + } + case *[]byte: + *arg = data + default: + err = json.Unmarshal(data, arg) + } + return +} diff --git a/vendor/github.com/nats-io/nats.go/go_test.mod b/vendor/github.com/nats-io/nats.go/go_test.mod new file mode 100644 index 00000000..8902c1ed --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/go_test.mod @@ -0,0 +1,22 @@ +module github.com/nats-io/nats.go + +go 1.19 + +require ( + github.com/golang/protobuf v1.4.2 + github.com/klauspost/compress v1.17.0 + github.com/nats-io/nats-server/v2 v2.10.0 + github.com/nats-io/nkeys v0.4.5 + github.com/nats-io/nuid v1.0.1 + go.uber.org/goleak v1.2.1 + golang.org/x/text v0.13.0 + google.golang.org/protobuf v1.23.0 +) + +require ( + github.com/minio/highwayhash v1.0.2 // indirect + github.com/nats-io/jwt/v2 v2.5.2 // indirect + golang.org/x/crypto v0.13.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/time v0.3.0 // indirect +) diff --git a/vendor/github.com/nats-io/nats.go/go_test.sum b/vendor/github.com/nats-io/nats.go/go_test.sum new file mode 100644 index 00000000..ce4ba920 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/go_test.sum @@ -0,0 +1,48 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/nats-io/jwt/v2 v2.5.2 h1:DhGH+nKt+wIkDxM6qnVSKjokq5t59AZV5HRcFW0zJwU= +github.com/nats-io/jwt/v2 v2.5.2/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= +github.com/nats-io/nats-server/v2 v2.10.0 h1:rcU++Hzo+wARxtJugrV3J5z5iGdHeVG8tT8Chb3bKDg= +github.com/nats-io/nats-server/v2 v2.10.0/go.mod h1:3PMvMSu2cuK0J9YInRLWdFpFsswKKGUS77zVSAudRto= +github.com/nats-io/nkeys v0.4.5 h1:Zdz2BUlFm4fJlierwvGK+yl20IAKUm7eV6AAZXEhkPk= +github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/nats-io/nats.go/internal/parser/parse.go b/vendor/github.com/nats-io/nats.go/internal/parser/parse.go new file mode 100644 index 00000000..7eab8add --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/internal/parser/parse.go @@ -0,0 +1,104 @@ +// Copyright 2020-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "errors" + "fmt" +) + +const ( + AckDomainTokenPos = iota + 2 + AckAccHashTokenPos + AckStreamTokenPos + AckConsumerTokenPos + AckNumDeliveredTokenPos + AckStreamSeqTokenPos + AckConsumerSeqTokenPos + AckTimestampSeqTokenPos + AckNumPendingTokenPos +) + +var ErrInvalidSubjectFormat = errors.New("invalid format of ACK subject") + +// Quick parser for positive numbers in ack reply encoding. +// NOTE: This parser does not detect uint64 overflow +func ParseNum(d string) (n uint64) { + if len(d) == 0 { + return 0 + } + + // ASCII numbers 0-9 + const ( + asciiZero = 48 + asciiNine = 57 + ) + + for _, dec := range d { + if dec < asciiZero || dec > asciiNine { + return 0 + } + n = n*10 + uint64(dec) - asciiZero + } + return +} + +func GetMetadataFields(subject string) ([]string, error) { + v1TokenCounts, v2TokenCounts := 9, 12 + + var start int + tokens := make([]string, 0, v2TokenCounts) + for i := 0; i < len(subject); i++ { + if subject[i] == '.' { + tokens = append(tokens, subject[start:i]) + start = i + 1 + } + } + tokens = append(tokens, subject[start:]) + // + // Newer server will include the domain name and account hash in the subject, + // and a token at the end. + // + // Old subject was: + // $JS.ACK....... + // + // New subject would be: + // $JS.ACK.......... + // + // v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since + // it may be removed in the future. Also, the library has no use for it. + // The point is that a v2 ACK subject is valid if it has at least 11 tokens. + // + tokensLen := len(tokens) + // If lower than 9 or more than 9 but less than 11, report an error + if tokensLen < v1TokenCounts || (tokensLen > v1TokenCounts && tokensLen < v2TokenCounts-1) { + return nil, ErrInvalidSubjectFormat + } + if tokens[0] != "$JS" || tokens[1] != "ACK" { + return nil, fmt.Errorf("%w: subject should start with $JS.ACK", ErrInvalidSubjectFormat) + } + // For v1 style, we insert 2 empty tokens (domain and hash) so that the + // rest of the library references known fields at a constant location. + if tokensLen == v1TokenCounts { + // Extend the array (we know the backend is big enough) + tokens = append(tokens[:AckDomainTokenPos+2], tokens[AckDomainTokenPos:]...) + // Clear the domain and hash tokens + tokens[AckDomainTokenPos], tokens[AckAccHashTokenPos] = "", "" + + } else if tokens[AckDomainTokenPos] == "_" { + // If domain is "_", replace with empty value. + tokens[AckDomainTokenPos] = "" + } + return tokens, nil +} diff --git a/vendor/github.com/nats-io/nats.go/js.go b/vendor/github.com/nats-io/nats.go/js.go new file mode 100644 index 00000000..444278e0 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/js.go @@ -0,0 +1,3815 @@ +// Copyright 2020-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/nats.go/internal/parser" + "github.com/nats-io/nuid" +) + +// JetStream allows persistent messaging through JetStream. +type JetStream interface { + // Publish publishes a message to JetStream. + Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) + + // PublishMsg publishes a Msg to JetStream. + PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) + + // PublishAsync publishes a message to JetStream and returns a PubAckFuture. + // The data should not be changed until the PubAckFuture has been processed. + PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) + + // PublishMsgAsync publishes a Msg to JetStream and returns a PubAckFuture. + // The message should not be changed until the PubAckFuture has been processed. + PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) + + // PublishAsyncPending returns the number of async publishes outstanding for this context. + PublishAsyncPending() int + + // PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd. + PublishAsyncComplete() <-chan struct{} + + // Subscribe creates an async Subscription for JetStream. + // The stream and consumer names can be provided with the nats.Bind() option. + // For creating an ephemeral (where the consumer name is picked by the server), + // you can provide the stream name with nats.BindStream(). + // If no stream name is specified, the library will attempt to figure out which + // stream the subscription is for. See important notes below for more details. + // + // IMPORTANT NOTES: + // * If none of the options Bind() nor Durable() are specified, the library will + // send a request to the server to create an ephemeral JetStream consumer, + // which will be deleted after an Unsubscribe() or Drain(), or automatically + // by the server after a short period of time after the NATS subscription is + // gone. + // * If Durable() option is specified, the library will attempt to lookup a JetStream + // consumer with this name, and if found, will bind to it and not attempt to + // delete it. However, if not found, the library will send a request to + // create such durable JetStream consumer. Note that the library will delete + // the JetStream consumer after an Unsubscribe() or Drain() only if it + // created the durable consumer while subscribing. If the durable consumer + // already existed prior to subscribing it won't be deleted. + // * If Bind() option is provided, the library will attempt to lookup the + // consumer with the given name, and if successful, bind to it. If the lookup fails, + // then the Subscribe() call will return an error. + Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) + + // SubscribeSync creates a Subscription that can be used to process messages synchronously. + // See important note in Subscribe() + SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) + + // ChanSubscribe creates channel based Subscription. + // See important note in Subscribe() + ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) + + // ChanQueueSubscribe creates channel based Subscription with a queue group. + // See important note in QueueSubscribe() + ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) + + // QueueSubscribe creates a Subscription with a queue group. + // If no optional durable name nor binding options are specified, the queue name will be used as a durable name. + // See important note in Subscribe() + QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) + + // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. + // See important note in QueueSubscribe() + QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) + + // PullSubscribe creates a Subscription that can fetch messages. + // See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be + // set to an empty string. + PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) +} + +// JetStreamContext allows JetStream messaging and stream management. +type JetStreamContext interface { + JetStream + JetStreamManager + KeyValueManager + ObjectStoreManager +} + +// Request API subjects for JetStream. +const ( + // defaultAPIPrefix is the default prefix for the JetStream API. + defaultAPIPrefix = "$JS.API." + + // jsDomainT is used to create JetStream API prefix by specifying only Domain + jsDomainT = "$JS.%s.API." + + // jsExtDomainT is used to create a StreamSource External APIPrefix + jsExtDomainT = "$JS.%s.API" + + // apiAccountInfo is for obtaining general information about JetStream. + apiAccountInfo = "INFO" + + // apiConsumerCreateT is used to create consumers. + // it accepts stream name and consumer name. + apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" + + // apiConsumerCreateT is used to create consumers. + // it accepts stream name, consumer name and filter subject + apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" + + // apiLegacyConsumerCreateT is used to create consumers. + // this is a legacy endpoint to support creating ephemerals before nats-server v2.9.0. + apiLegacyConsumerCreateT = "CONSUMER.CREATE.%s" + + // apiDurableCreateT is used to create durable consumers. + // this is a legacy endpoint to support creating durable consumers before nats-server v2.9.0. + apiDurableCreateT = "CONSUMER.DURABLE.CREATE.%s.%s" + + // apiConsumerInfoT is used to create consumers. + apiConsumerInfoT = "CONSUMER.INFO.%s.%s" + + // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. + apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" + + // apiConsumerDeleteT is used to delete consumers. + apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" + + // apiConsumerListT is used to return all detailed consumer information + apiConsumerListT = "CONSUMER.LIST.%s" + + // apiConsumerNamesT is used to return a list with all consumer names for the stream. + apiConsumerNamesT = "CONSUMER.NAMES.%s" + + // apiStreams can lookup a stream by subject. + apiStreams = "STREAM.NAMES" + + // apiStreamCreateT is the endpoint to create new streams. + apiStreamCreateT = "STREAM.CREATE.%s" + + // apiStreamInfoT is the endpoint to get information on a stream. + apiStreamInfoT = "STREAM.INFO.%s" + + // apiStreamUpdateT is the endpoint to update existing streams. + apiStreamUpdateT = "STREAM.UPDATE.%s" + + // apiStreamDeleteT is the endpoint to delete streams. + apiStreamDeleteT = "STREAM.DELETE.%s" + + // apiStreamPurgeT is the endpoint to purge streams. + apiStreamPurgeT = "STREAM.PURGE.%s" + + // apiStreamListT is the endpoint that will return all detailed stream information + apiStreamListT = "STREAM.LIST" + + // apiMsgGetT is the endpoint to get a message. + apiMsgGetT = "STREAM.MSG.GET.%s" + + // apiMsgGetT is the endpoint to perform a direct get of a message. + apiDirectMsgGetT = "DIRECT.GET.%s" + + // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. + apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" + + // apiMsgDeleteT is the endpoint to remove a message. + apiMsgDeleteT = "STREAM.MSG.DELETE.%s" + + // orderedHeartbeatsInterval is how fast we want HBs from the server during idle. + orderedHeartbeatsInterval = 5 * time.Second + + // Scale for threshold of missed HBs or lack of activity. + hbcThresh = 2 + + // For ChanSubscription, we can't update sub.delivered as we do for other + // type of subscriptions, since the channel is user provided. + // With flow control in play, we will check for flow control on incoming + // messages (as opposed to when they are delivered), but also from a go + // routine. Without this, the subscription would possibly stall until + // a new message or heartbeat/fc are received. + chanSubFCCheckInterval = 250 * time.Millisecond + + // Default time wait between retries on Publish iff err is NoResponders. + DefaultPubRetryWait = 250 * time.Millisecond + + // Default number of retries + DefaultPubRetryAttempts = 2 + + // defaultAsyncPubAckInflight is the number of async pub acks inflight. + defaultAsyncPubAckInflight = 4000 +) + +// Types of control messages, so far heartbeat and flow control +const ( + jsCtrlHB = 1 + jsCtrlFC = 2 +) + +// js is an internal struct from a JetStreamContext. +type js struct { + nc *Conn + opts *jsOpts + + // For async publish context. + mu sync.RWMutex + rpre string + rsub *Subscription + pafs map[string]*pubAckFuture + stc chan struct{} + dch chan struct{} + rr *rand.Rand + connStatusCh chan (Status) +} + +type jsOpts struct { + ctx context.Context + // For importing JetStream from other accounts. + pre string + // Amount of time to wait for API requests. + wait time.Duration + // For async publish error handling. + aecb MsgErrHandler + // Max async pub ack in flight + maxpa int + // the domain that produced the pre + domain string + // enables protocol tracing + ctrace ClientTrace + shouldTrace bool + // purgeOpts contains optional stream purge options + purgeOpts *StreamPurgeRequest + // streamInfoOpts contains optional stream info options + streamInfoOpts *StreamInfoRequest + // streamListSubject is used for subject filtering when listing streams / stream names + streamListSubject string + // For direct get message requests + directGet bool + // For direct get next message + directNextFor string + + // featureFlags are used to enable/disable specific JetStream features + featureFlags featureFlags +} + +const ( + defaultRequestWait = 5 * time.Second + defaultAccountCheck = 20 * time.Second +) + +// JetStream returns a JetStreamContext for messaging and stream management. +// Errors are only returned if inconsistent options are provided. +func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { + js := &js{ + nc: nc, + opts: &jsOpts{ + pre: defaultAPIPrefix, + wait: defaultRequestWait, + maxpa: defaultAsyncPubAckInflight, + }, + } + + for _, opt := range opts { + if err := opt.configureJSContext(js.opts); err != nil { + return nil, err + } + } + return js, nil +} + +// JSOpt configures a JetStreamContext. +type JSOpt interface { + configureJSContext(opts *jsOpts) error +} + +// jsOptFn configures an option for the JetStreamContext. +type jsOptFn func(opts *jsOpts) error + +func (opt jsOptFn) configureJSContext(opts *jsOpts) error { + return opt(opts) +} + +type featureFlags struct { + useDurableConsumerCreate bool +} + +// UseLegacyDurableConsumers makes JetStream use the legacy (pre nats-server v2.9.0) subjects for consumer creation. +// If this option is used when creating JetStremContext, $JS.API.CONSUMER.DURABLE.CREATE.. will be used +// to create a consumer with Durable provided, rather than $JS.API.CONSUMER.CREATE... +func UseLegacyDurableConsumers() JSOpt { + return jsOptFn(func(opts *jsOpts) error { + opts.featureFlags.useDurableConsumerCreate = true + return nil + }) +} + +// ClientTrace can be used to trace API interactions for the JetStream Context. +type ClientTrace struct { + RequestSent func(subj string, payload []byte) + ResponseReceived func(subj string, payload []byte, hdr Header) +} + +func (ct ClientTrace) configureJSContext(js *jsOpts) error { + js.ctrace = ct + js.shouldTrace = true + return nil +} + +// Domain changes the domain part of JetStream API prefix. +func Domain(domain string) JSOpt { + if domain == _EMPTY_ { + return APIPrefix(_EMPTY_) + } + + return jsOptFn(func(js *jsOpts) error { + js.domain = domain + js.pre = fmt.Sprintf(jsDomainT, domain) + + return nil + }) + +} + +func (s *StreamPurgeRequest) configureJSContext(js *jsOpts) error { + js.purgeOpts = s + return nil +} + +func (s *StreamInfoRequest) configureJSContext(js *jsOpts) error { + js.streamInfoOpts = s + return nil +} + +// APIPrefix changes the default prefix used for the JetStream API. +func APIPrefix(pre string) JSOpt { + return jsOptFn(func(js *jsOpts) error { + if pre == _EMPTY_ { + return nil + } + + js.pre = pre + if !strings.HasSuffix(js.pre, ".") { + js.pre = js.pre + "." + } + + return nil + }) +} + +// DirectGet is an option that can be used to make GetMsg() or GetLastMsg() +// retrieve message directly from a group of servers (leader and replicas) +// if the stream was created with the AllowDirect option. +func DirectGet() JSOpt { + return jsOptFn(func(js *jsOpts) error { + js.directGet = true + return nil + }) +} + +// DirectGetNext is an option that can be used to make GetMsg() retrieve message +// directly from a group of servers (leader and replicas) if the stream was +// created with the AllowDirect option. +// The server will find the next message matching the filter `subject` starting +// at the start sequence (argument in GetMsg()). The filter `subject` can be a +// wildcard. +func DirectGetNext(subject string) JSOpt { + return jsOptFn(func(js *jsOpts) error { + js.directGet = true + js.directNextFor = subject + return nil + }) +} + +// StreamListFilter is an option that can be used to configure `StreamsInfo()` and `StreamNames()` requests. +// It allows filtering the returned streams by subject associated with each stream. +// Wildcards can be used. For example, `StreamListFilter(FOO.*.A) will return +// all streams which have at least one subject matching the provided pattern (e.g. FOO.TEST.A). +func StreamListFilter(subject string) JSOpt { + return jsOptFn(func(opts *jsOpts) error { + opts.streamListSubject = subject + return nil + }) +} + +func (js *js) apiSubj(subj string) string { + if js.opts.pre == _EMPTY_ { + return subj + } + var b strings.Builder + b.WriteString(js.opts.pre) + b.WriteString(subj) + return b.String() +} + +// PubOpt configures options for publishing JetStream messages. +type PubOpt interface { + configurePublish(opts *pubOpts) error +} + +// pubOptFn is a function option used to configure JetStream Publish. +type pubOptFn func(opts *pubOpts) error + +func (opt pubOptFn) configurePublish(opts *pubOpts) error { + return opt(opts) +} + +type pubOpts struct { + ctx context.Context + ttl time.Duration + id string + lid string // Expected last msgId + str string // Expected stream name + seq *uint64 // Expected last sequence + lss *uint64 // Expected last sequence per subject + + // Publish retries for NoResponders err. + rwait time.Duration // Retry wait between attempts + rnum int // Retry attempts + + // stallWait is the max wait of a async pub ack. + stallWait time.Duration +} + +// pubAckResponse is the ack response from the JetStream API when publishing a message. +type pubAckResponse struct { + apiResponse + *PubAck +} + +// PubAck is an ack received after successfully publishing a message. +type PubAck struct { + Stream string `json:"stream"` + Sequence uint64 `json:"seq"` + Duplicate bool `json:"duplicate,omitempty"` + Domain string `json:"domain,omitempty"` +} + +// Headers for published messages. +const ( + MsgIdHdr = "Nats-Msg-Id" + ExpectedStreamHdr = "Nats-Expected-Stream" + ExpectedLastSeqHdr = "Nats-Expected-Last-Sequence" + ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence" + ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id" + MsgRollup = "Nats-Rollup" +) + +// Headers for republished messages and direct gets. +const ( + JSStream = "Nats-Stream" + JSSequence = "Nats-Sequence" + JSTimeStamp = "Nats-Time-Stamp" + JSSubject = "Nats-Subject" + JSLastSequence = "Nats-Last-Sequence" +) + +// MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested. +const MsgSize = "Nats-Msg-Size" + +// Rollups, can be subject only or all messages. +const ( + MsgRollupSubject = "sub" + MsgRollupAll = "all" +) + +// PublishMsg publishes a Msg to a stream from JetStream. +func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { + var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts} + if len(opts) > 0 { + if m.Header == nil { + m.Header = Header{} + } + for _, opt := range opts { + if err := opt.configurePublish(&o); err != nil { + return nil, err + } + } + } + // Check for option collisions. Right now just timeout and context. + if o.ctx != nil && o.ttl != 0 { + return nil, ErrContextAndTimeout + } + if o.ttl == 0 && o.ctx == nil { + o.ttl = js.opts.wait + } + if o.stallWait > 0 { + return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish") + } + + if o.id != _EMPTY_ { + m.Header.Set(MsgIdHdr, o.id) + } + if o.lid != _EMPTY_ { + m.Header.Set(ExpectedLastMsgIdHdr, o.lid) + } + if o.str != _EMPTY_ { + m.Header.Set(ExpectedStreamHdr, o.str) + } + if o.seq != nil { + m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) + } + if o.lss != nil { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) + } + + var resp *Msg + var err error + + if o.ttl > 0 { + resp, err = js.nc.RequestMsg(m, time.Duration(o.ttl)) + } else { + resp, err = js.nc.RequestMsgWithContext(o.ctx, m) + } + + if err != nil { + for r, ttl := 0, o.ttl; err == ErrNoResponders && (r < o.rnum || o.rnum < 0); r++ { + // To protect against small blips in leadership changes etc, if we get a no responders here retry. + if o.ctx != nil { + select { + case <-o.ctx.Done(): + case <-time.After(o.rwait): + } + } else { + time.Sleep(o.rwait) + } + if o.ttl > 0 { + ttl -= o.rwait + if ttl <= 0 { + err = ErrTimeout + break + } + resp, err = js.nc.RequestMsg(m, time.Duration(ttl)) + } else { + resp, err = js.nc.RequestMsgWithContext(o.ctx, m) + } + } + if err != nil { + if err == ErrNoResponders { + err = ErrNoStreamResponse + } + return nil, err + } + } + + var pa pubAckResponse + if err := json.Unmarshal(resp.Data, &pa); err != nil { + return nil, ErrInvalidJSAck + } + if pa.Error != nil { + return nil, pa.Error + } + if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { + return nil, ErrInvalidJSAck + } + return pa.PubAck, nil +} + +// Publish publishes a message to a stream from JetStream. +func (js *js) Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) { + return js.PublishMsg(&Msg{Subject: subj, Data: data}, opts...) +} + +// PubAckFuture is a future for a PubAck. +type PubAckFuture interface { + // Ok returns a receive only channel that can be used to get a PubAck. + Ok() <-chan *PubAck + + // Err returns a receive only channel that can be used to get the error from an async publish. + Err() <-chan error + + // Msg returns the message that was sent to the server. + Msg() *Msg +} + +type pubAckFuture struct { + js *js + msg *Msg + pa *PubAck + st time.Time + err error + errCh chan error + doneCh chan *PubAck +} + +func (paf *pubAckFuture) Ok() <-chan *PubAck { + paf.js.mu.Lock() + defer paf.js.mu.Unlock() + + if paf.doneCh == nil { + paf.doneCh = make(chan *PubAck, 1) + if paf.pa != nil { + paf.doneCh <- paf.pa + } + } + + return paf.doneCh +} + +func (paf *pubAckFuture) Err() <-chan error { + paf.js.mu.Lock() + defer paf.js.mu.Unlock() + + if paf.errCh == nil { + paf.errCh = make(chan error, 1) + if paf.err != nil { + paf.errCh <- paf.err + } + } + + return paf.errCh +} + +func (paf *pubAckFuture) Msg() *Msg { + paf.js.mu.RLock() + defer paf.js.mu.RUnlock() + return paf.msg +} + +// For quick token lookup etc. +const aReplyPreLen = 14 +const aReplyTokensize = 6 + +func (js *js) newAsyncReply() string { + js.mu.Lock() + if js.rsub == nil { + // Create our wildcard reply subject. + sha := sha256.New() + sha.Write([]byte(nuid.Next())) + b := sha.Sum(nil) + for i := 0; i < aReplyTokensize; i++ { + b[i] = rdigits[int(b[i]%base)] + } + inboxPrefix := InboxPrefix + if js.nc.Opts.InboxPrefix != _EMPTY_ { + inboxPrefix = js.nc.Opts.InboxPrefix + "." + } + js.rpre = fmt.Sprintf("%s%s.", inboxPrefix, b[:aReplyTokensize]) + sub, err := js.nc.Subscribe(fmt.Sprintf("%s*", js.rpre), js.handleAsyncReply) + if err != nil { + js.mu.Unlock() + return _EMPTY_ + } + js.rsub = sub + js.rr = rand.New(rand.NewSource(time.Now().UnixNano())) + } + if js.connStatusCh == nil { + js.connStatusCh = js.nc.StatusChanged(RECONNECTING, CLOSED) + go js.resetPendingAcksOnReconnect() + } + var sb strings.Builder + sb.WriteString(js.rpre) + rn := js.rr.Int63() + var b [aReplyTokensize]byte + for i, l := 0, rn; i < len(b); i++ { + b[i] = rdigits[l%base] + l /= base + } + sb.Write(b[:]) + js.mu.Unlock() + return sb.String() +} + +func (js *js) resetPendingAcksOnReconnect() { + js.mu.Lock() + connStatusCh := js.connStatusCh + js.mu.Unlock() + for { + newStatus, ok := <-connStatusCh + if !ok || newStatus == CLOSED { + return + } + js.mu.Lock() + for _, paf := range js.pafs { + paf.err = ErrDisconnected + } + js.pafs = nil + if js.dch != nil { + close(js.dch) + js.dch = nil + } + js.mu.Unlock() + } +} + +func (js *js) cleanupReplySub() { + js.mu.Lock() + if js.rsub != nil { + js.rsub.Unsubscribe() + js.rsub = nil + } + if js.connStatusCh != nil { + close(js.connStatusCh) + js.connStatusCh = nil + } + js.mu.Unlock() +} + +// registerPAF will register for a PubAckFuture. +func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) { + js.mu.Lock() + if js.pafs == nil { + js.pafs = make(map[string]*pubAckFuture) + } + paf.js = js + js.pafs[id] = paf + np := len(js.pafs) + maxpa := js.opts.maxpa + js.mu.Unlock() + return np, maxpa +} + +// Lock should be held. +func (js *js) getPAF(id string) *pubAckFuture { + if js.pafs == nil { + return nil + } + return js.pafs[id] +} + +// clearPAF will remove a PubAckFuture that was registered. +func (js *js) clearPAF(id string) { + js.mu.Lock() + delete(js.pafs, id) + js.mu.Unlock() +} + +// PublishAsyncPending returns how many PubAckFutures are pending. +func (js *js) PublishAsyncPending() int { + js.mu.RLock() + defer js.mu.RUnlock() + return len(js.pafs) +} + +func (js *js) asyncStall() <-chan struct{} { + js.mu.Lock() + if js.stc == nil { + js.stc = make(chan struct{}) + } + stc := js.stc + js.mu.Unlock() + return stc +} + +// Handle an async reply from PublishAsync. +func (js *js) handleAsyncReply(m *Msg) { + if len(m.Subject) <= aReplyPreLen { + return + } + id := m.Subject[aReplyPreLen:] + + js.mu.Lock() + paf := js.getPAF(id) + if paf == nil { + js.mu.Unlock() + return + } + // Remove + delete(js.pafs, id) + + // Check on anyone stalled and waiting. + if js.stc != nil && len(js.pafs) < js.opts.maxpa { + close(js.stc) + js.stc = nil + } + // Check on anyone one waiting on done status. + if js.dch != nil && len(js.pafs) == 0 { + dch := js.dch + js.dch = nil + // Defer here so error is processed and can be checked. + defer close(dch) + } + + doErr := func(err error) { + paf.err = err + if paf.errCh != nil { + paf.errCh <- paf.err + } + cb := js.opts.aecb + js.mu.Unlock() + if cb != nil { + cb(paf.js, paf.msg, err) + } + } + + // Process no responders etc. + if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + doErr(ErrNoResponders) + return + } + + var pa pubAckResponse + if err := json.Unmarshal(m.Data, &pa); err != nil { + doErr(ErrInvalidJSAck) + return + } + if pa.Error != nil { + doErr(pa.Error) + return + } + if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { + doErr(ErrInvalidJSAck) + return + } + + // So here we have received a proper puback. + paf.pa = pa.PubAck + if paf.doneCh != nil { + paf.doneCh <- paf.pa + } + js.mu.Unlock() +} + +// MsgErrHandler is used to process asynchronous errors from +// JetStream PublishAsync. It will return the original +// message sent to the server for possible retransmitting and the error encountered. +type MsgErrHandler func(JetStream, *Msg, error) + +// PublishAsyncErrHandler sets the error handler for async publishes in JetStream. +func PublishAsyncErrHandler(cb MsgErrHandler) JSOpt { + return jsOptFn(func(js *jsOpts) error { + js.aecb = cb + return nil + }) +} + +// PublishAsyncMaxPending sets the maximum outstanding async publishes that can be inflight at one time. +func PublishAsyncMaxPending(max int) JSOpt { + return jsOptFn(func(js *jsOpts) error { + if max < 1 { + return errors.New("nats: max ack pending should be >= 1") + } + js.maxpa = max + return nil + }) +} + +// PublishAsync publishes a message to JetStream and returns a PubAckFuture +func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) { + return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...) +} + +const defaultStallWait = 200 * time.Millisecond + +func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { + var o pubOpts + if len(opts) > 0 { + if m.Header == nil { + m.Header = Header{} + } + for _, opt := range opts { + if err := opt.configurePublish(&o); err != nil { + return nil, err + } + } + } + + // Timeouts and contexts do not make sense for these. + if o.ttl != 0 || o.ctx != nil { + return nil, ErrContextAndTimeout + } + stallWait := defaultStallWait + if o.stallWait > 0 { + stallWait = o.stallWait + } + + // FIXME(dlc) - Make common. + if o.id != _EMPTY_ { + m.Header.Set(MsgIdHdr, o.id) + } + if o.lid != _EMPTY_ { + m.Header.Set(ExpectedLastMsgIdHdr, o.lid) + } + if o.str != _EMPTY_ { + m.Header.Set(ExpectedStreamHdr, o.str) + } + if o.seq != nil { + m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) + } + if o.lss != nil { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) + } + + // Reply + if m.Reply != _EMPTY_ { + return nil, errors.New("nats: reply subject should be empty") + } + reply := m.Reply + m.Reply = js.newAsyncReply() + defer func() { m.Reply = reply }() + + if m.Reply == _EMPTY_ { + return nil, errors.New("nats: error creating async reply handler") + } + + id := m.Reply[aReplyPreLen:] + paf := &pubAckFuture{msg: m, st: time.Now()} + numPending, maxPending := js.registerPAF(id, paf) + + if maxPending > 0 && numPending >= maxPending { + select { + case <-js.asyncStall(): + case <-time.After(stallWait): + js.clearPAF(id) + return nil, errors.New("nats: stalled with too many outstanding async published messages") + } + } + if err := js.nc.PublishMsg(m); err != nil { + js.clearPAF(id) + return nil, err + } + + return paf, nil +} + +// PublishAsyncComplete returns a channel that will be closed when all outstanding messages have been ack'd. +func (js *js) PublishAsyncComplete() <-chan struct{} { + js.mu.Lock() + defer js.mu.Unlock() + if js.dch == nil { + js.dch = make(chan struct{}) + } + dch := js.dch + if len(js.pafs) == 0 { + close(js.dch) + js.dch = nil + } + return dch +} + +// MsgId sets the message ID used for deduplication. +func MsgId(id string) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.id = id + return nil + }) +} + +// ExpectStream sets the expected stream to respond from the publish. +func ExpectStream(stream string) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.str = stream + return nil + }) +} + +// ExpectLastSequence sets the expected sequence in the response from the publish. +func ExpectLastSequence(seq uint64) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.seq = &seq + return nil + }) +} + +// ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish. +func ExpectLastSequencePerSubject(seq uint64) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.lss = &seq + return nil + }) +} + +// ExpectLastMsgId sets the expected last msgId in the response from the publish. +func ExpectLastMsgId(id string) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.lid = id + return nil + }) +} + +// RetryWait sets the retry wait time when ErrNoResponders is encountered. +func RetryWait(dur time.Duration) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.rwait = dur + return nil + }) +} + +// RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered. +func RetryAttempts(num int) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.rnum = num + return nil + }) +} + +// StallWait sets the max wait when the producer becomes stall producing messages. +func StallWait(ttl time.Duration) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + if ttl <= 0 { + return fmt.Errorf("nats: stall wait should be more than 0") + } + opts.stallWait = ttl + return nil + }) +} + +type ackOpts struct { + ttl time.Duration + ctx context.Context + nakDelay time.Duration +} + +// AckOpt are the options that can be passed when acknowledge a message. +type AckOpt interface { + configureAck(opts *ackOpts) error +} + +// MaxWait sets the maximum amount of time we will wait for a response. +type MaxWait time.Duration + +func (ttl MaxWait) configureJSContext(js *jsOpts) error { + js.wait = time.Duration(ttl) + return nil +} + +func (ttl MaxWait) configurePull(opts *pullOpts) error { + opts.ttl = time.Duration(ttl) + return nil +} + +// AckWait sets the maximum amount of time we will wait for an ack. +type AckWait time.Duration + +func (ttl AckWait) configurePublish(opts *pubOpts) error { + opts.ttl = time.Duration(ttl) + return nil +} + +func (ttl AckWait) configureSubscribe(opts *subOpts) error { + opts.cfg.AckWait = time.Duration(ttl) + return nil +} + +func (ttl AckWait) configureAck(opts *ackOpts) error { + opts.ttl = time.Duration(ttl) + return nil +} + +// ContextOpt is an option used to set a context.Context. +type ContextOpt struct { + context.Context +} + +func (ctx ContextOpt) configureJSContext(opts *jsOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configurePublish(opts *pubOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configureSubscribe(opts *subOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configurePull(opts *pullOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configureAck(opts *ackOpts) error { + opts.ctx = ctx + return nil +} + +// Context returns an option that can be used to configure a context for APIs +// that are context aware such as those part of the JetStream interface. +func Context(ctx context.Context) ContextOpt { + return ContextOpt{ctx} +} + +type nakDelay time.Duration + +func (d nakDelay) configureAck(opts *ackOpts) error { + opts.nakDelay = time.Duration(d) + return nil +} + +// Subscribe + +// ConsumerConfig is the configuration of a JetStream consumer. +type ConsumerConfig struct { + Durable string `json:"durable_name,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + DeliverPolicy DeliverPolicy `json:"deliver_policy"` + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + AckPolicy AckPolicy `json:"ack_policy"` + AckWait time.Duration `json:"ack_wait,omitempty"` + MaxDeliver int `json:"max_deliver,omitempty"` + BackOff []time.Duration `json:"backoff,omitempty"` + FilterSubject string `json:"filter_subject,omitempty"` + FilterSubjects []string `json:"filter_subjects,omitempty"` + ReplayPolicy ReplayPolicy `json:"replay_policy"` + RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec + SampleFrequency string `json:"sample_freq,omitempty"` + MaxWaiting int `json:"max_waiting,omitempty"` + MaxAckPending int `json:"max_ack_pending,omitempty"` + FlowControl bool `json:"flow_control,omitempty"` + Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` + HeadersOnly bool `json:"headers_only,omitempty"` + + // Pull based options. + MaxRequestBatch int `json:"max_batch,omitempty"` + MaxRequestExpires time.Duration `json:"max_expires,omitempty"` + MaxRequestMaxBytes int `json:"max_bytes,omitempty"` + + // Push based consumers. + DeliverSubject string `json:"deliver_subject,omitempty"` + DeliverGroup string `json:"deliver_group,omitempty"` + + // Inactivity threshold. + InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + + // Generally inherited by parent stream and other markers, now can be configured directly. + Replicas int `json:"num_replicas"` + // Force memory storage. + MemoryStorage bool `json:"mem_storage,omitempty"` + + // Metadata is additional metadata for the Consumer. + // Keys starting with `_nats` are reserved. + // NOTE: Metadata requires nats-server v2.10.0+ + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ConsumerInfo is the info from a JetStream consumer. +type ConsumerInfo struct { + Stream string `json:"stream_name"` + Name string `json:"name"` + Created time.Time `json:"created"` + Config ConsumerConfig `json:"config"` + Delivered SequenceInfo `json:"delivered"` + AckFloor SequenceInfo `json:"ack_floor"` + NumAckPending int `json:"num_ack_pending"` + NumRedelivered int `json:"num_redelivered"` + NumWaiting int `json:"num_waiting"` + NumPending uint64 `json:"num_pending"` + Cluster *ClusterInfo `json:"cluster,omitempty"` + PushBound bool `json:"push_bound,omitempty"` +} + +// SequenceInfo has both the consumer and the stream sequence and last activity. +type SequenceInfo struct { + Consumer uint64 `json:"consumer_seq"` + Stream uint64 `json:"stream_seq"` + Last *time.Time `json:"last_active,omitempty"` +} + +// SequencePair includes the consumer and stream sequence info from a JetStream consumer. +type SequencePair struct { + Consumer uint64 `json:"consumer_seq"` + Stream uint64 `json:"stream_seq"` +} + +// nextRequest is for getting next messages for pull based consumers from JetStream. +type nextRequest struct { + Expires time.Duration `json:"expires,omitempty"` + Batch int `json:"batch,omitempty"` + NoWait bool `json:"no_wait,omitempty"` + MaxBytes int `json:"max_bytes,omitempty"` + Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` +} + +// jsSub includes JetStream subscription info. +type jsSub struct { + js *js + + // For pull subscribers, this is the next message subject to send requests to. + nms string + + psubj string // the subject that was passed by user to the subscribe calls + consumer string + stream string + deliver string + pull bool + dc bool // Delete JS consumer + ackNone bool + + // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the + // add consumer response. Note that some versions of the server gather the + // consumer info *after* the creation of the consumer, which means that + // some messages may have been already delivered. So the sum of the two + // is a more accurate representation of the number of messages pending or + // in the process of being delivered to the subscription when created. + pending uint64 + + // Ordered consumers + ordered bool + dseq uint64 + sseq uint64 + ccreq *createConsumerRequest + + // Heartbeats and Flow Control handling from push consumers. + hbc *time.Timer + hbi time.Duration + active bool + cmeta string + fcr string + fcd uint64 + fciseq uint64 + csfct *time.Timer + + // Cancellation function to cancel context on drain/unsubscribe. + cancel func() +} + +// Deletes the JS Consumer. +// No connection nor subscription lock must be held on entry. +func (sub *Subscription) deleteConsumer() error { + sub.mu.Lock() + jsi := sub.jsi + if jsi == nil { + sub.mu.Unlock() + return nil + } + stream, consumer := jsi.stream, jsi.consumer + js := jsi.js + sub.mu.Unlock() + + return js.DeleteConsumer(stream, consumer) +} + +// SubOpt configures options for subscribing to JetStream consumers. +type SubOpt interface { + configureSubscribe(opts *subOpts) error +} + +// subOptFn is a function option used to configure a JetStream Subscribe. +type subOptFn func(opts *subOpts) error + +func (opt subOptFn) configureSubscribe(opts *subOpts) error { + return opt(opts) +} + +// Subscribe creates an async Subscription for JetStream. +// The stream and consumer names can be provided with the nats.Bind() option. +// For creating an ephemeral (where the consumer name is picked by the server), +// you can provide the stream name with nats.BindStream(). +// If no stream name is specified, the library will attempt to figure out which +// stream the subscription is for. See important notes below for more details. +// +// IMPORTANT NOTES: +// * If none of the options Bind() nor Durable() are specified, the library will +// send a request to the server to create an ephemeral JetStream consumer, +// which will be deleted after an Unsubscribe() or Drain(), or automatically +// by the server after a short period of time after the NATS subscription is +// gone. +// * If Durable() option is specified, the library will attempt to lookup a JetStream +// consumer with this name, and if found, will bind to it and not attempt to +// delete it. However, if not found, the library will send a request to create +// such durable JetStream consumer. The library will delete the JetStream consumer +// after an Unsubscribe() or Drain(). +// * If Bind() option is provided, the library will attempt to lookup the +// consumer with the given name, and if successful, bind to it. If the lookup fails, +// then the Subscribe() call will return an error. +func (js *js) Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { + if cb == nil { + return nil, ErrBadSubscription + } + return js.subscribe(subj, _EMPTY_, cb, nil, false, false, opts) +} + +// SubscribeSync creates a Subscription that can be used to process messages synchronously. +// See important note in Subscribe() +func (js *js) SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) { + mch := make(chan *Msg, js.nc.Opts.SubChanLen) + return js.subscribe(subj, _EMPTY_, nil, mch, true, false, opts) +} + +// QueueSubscribe creates a Subscription with a queue group. +// If no optional durable name nor binding options are specified, the queue name will be used as a durable name. +// See important note in Subscribe() +func (js *js) QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { + if cb == nil { + return nil, ErrBadSubscription + } + return js.subscribe(subj, queue, cb, nil, false, false, opts) +} + +// QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. +// See important note in QueueSubscribe() +func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) { + mch := make(chan *Msg, js.nc.Opts.SubChanLen) + return js.subscribe(subj, queue, nil, mch, true, false, opts) +} + +// ChanSubscribe creates channel based Subscription. +// Using ChanSubscribe without buffered capacity is not recommended since +// it will be prone to dropping messages with a slow consumer error. Make sure to give the channel enough +// capacity to handle bursts in traffic, for example other Subscribe APIs use a default of 512k capacity in comparison. +// See important note in Subscribe() +func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { + return js.subscribe(subj, _EMPTY_, nil, ch, false, false, opts) +} + +// ChanQueueSubscribe creates channel based Subscription with a queue group. +// See important note in QueueSubscribe() +func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { + return js.subscribe(subj, queue, nil, ch, false, false, opts) +} + +// PullSubscribe creates a Subscription that can fetch messages. +// See important note in Subscribe() +func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) { + mch := make(chan *Msg, js.nc.Opts.SubChanLen) + if durable != "" { + opts = append(opts, Durable(durable)) + } + return js.subscribe(subj, _EMPTY_, nil, mch, true, true, opts) +} + +func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode bool, subj, queue string) (string, error) { + ccfg := &info.Config + + // Make sure this new subject matches or is a subset. + if ccfg.FilterSubject != _EMPTY_ && subj != ccfg.FilterSubject { + return _EMPTY_, ErrSubjectMismatch + } + + // Prevent binding a subscription against incompatible consumer types. + if isPullMode && ccfg.DeliverSubject != _EMPTY_ { + return _EMPTY_, ErrPullSubscribeToPushConsumer + } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ { + return _EMPTY_, ErrPullSubscribeRequired + } + + // If pull mode, nothing else to check here. + if isPullMode { + return _EMPTY_, checkConfig(ccfg, userCfg) + } + + // At this point, we know the user wants push mode, and the JS consumer is + // really push mode. + + dg := info.Config.DeliverGroup + if dg == _EMPTY_ { + // Prevent an user from attempting to create a queue subscription on + // a JS consumer that was not created with a deliver group. + if queue != _EMPTY_ { + return _EMPTY_, fmt.Errorf("cannot create a queue subscription for a consumer without a deliver group") + } else if info.PushBound { + // Need to reject a non queue subscription to a non queue consumer + // if the consumer is already bound. + return _EMPTY_, fmt.Errorf("consumer is already bound to a subscription") + } + } else { + // If the JS consumer has a deliver group, we need to fail a non queue + // subscription attempt: + if queue == _EMPTY_ { + return _EMPTY_, fmt.Errorf("cannot create a subscription for a consumer with a deliver group %q", dg) + } else if queue != dg { + // Here the user's queue group name does not match the one associated + // with the JS consumer. + return _EMPTY_, fmt.Errorf("cannot create a queue subscription %q for a consumer with a deliver group %q", + queue, dg) + } + } + if err := checkConfig(ccfg, userCfg); err != nil { + return _EMPTY_, err + } + return ccfg.DeliverSubject, nil +} + +func checkConfig(s, u *ConsumerConfig) error { + makeErr := func(fieldName string, usrVal, srvVal any) error { + return fmt.Errorf("configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal) + } + + if u.Durable != _EMPTY_ && u.Durable != s.Durable { + return makeErr("durable", u.Durable, s.Durable) + } + if u.Description != _EMPTY_ && u.Description != s.Description { + return makeErr("description", u.Description, s.Description) + } + if u.DeliverPolicy != deliverPolicyNotSet && u.DeliverPolicy != s.DeliverPolicy { + return makeErr("deliver policy", u.DeliverPolicy, s.DeliverPolicy) + } + if u.OptStartSeq > 0 && u.OptStartSeq != s.OptStartSeq { + return makeErr("optional start sequence", u.OptStartSeq, s.OptStartSeq) + } + if u.OptStartTime != nil && !u.OptStartTime.IsZero() && !(*u.OptStartTime).Equal(*s.OptStartTime) { + return makeErr("optional start time", u.OptStartTime, s.OptStartTime) + } + if u.AckPolicy != ackPolicyNotSet && u.AckPolicy != s.AckPolicy { + return makeErr("ack policy", u.AckPolicy, s.AckPolicy) + } + if u.AckWait > 0 && u.AckWait != s.AckWait { + return makeErr("ack wait", u.AckWait, s.AckWait) + } + if u.MaxDeliver > 0 && u.MaxDeliver != s.MaxDeliver { + return makeErr("max deliver", u.MaxDeliver, s.MaxDeliver) + } + if u.ReplayPolicy != replayPolicyNotSet && u.ReplayPolicy != s.ReplayPolicy { + return makeErr("replay policy", u.ReplayPolicy, s.ReplayPolicy) + } + if u.RateLimit > 0 && u.RateLimit != s.RateLimit { + return makeErr("rate limit", u.RateLimit, s.RateLimit) + } + if u.SampleFrequency != _EMPTY_ && u.SampleFrequency != s.SampleFrequency { + return makeErr("sample frequency", u.SampleFrequency, s.SampleFrequency) + } + if u.MaxWaiting > 0 && u.MaxWaiting != s.MaxWaiting { + return makeErr("max waiting", u.MaxWaiting, s.MaxWaiting) + } + if u.MaxAckPending > 0 && u.MaxAckPending != s.MaxAckPending { + return makeErr("max ack pending", u.MaxAckPending, s.MaxAckPending) + } + // For flow control, we want to fail if the user explicit wanted it, but + // it is not set in the existing consumer. If it is not asked by the user, + // the library still handles it and so no reason to fail. + if u.FlowControl && !s.FlowControl { + return makeErr("flow control", u.FlowControl, s.FlowControl) + } + if u.Heartbeat > 0 && u.Heartbeat != s.Heartbeat { + return makeErr("heartbeat", u.Heartbeat, s.Heartbeat) + } + if u.Replicas > 0 && u.Replicas != s.Replicas { + return makeErr("replicas", u.Replicas, s.Replicas) + } + if u.MemoryStorage && !s.MemoryStorage { + return makeErr("memory storage", u.MemoryStorage, s.MemoryStorage) + } + return nil +} + +func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, isPullMode bool, opts []SubOpt) (*Subscription, error) { + cfg := ConsumerConfig{ + DeliverPolicy: deliverPolicyNotSet, + AckPolicy: ackPolicyNotSet, + ReplayPolicy: replayPolicyNotSet, + } + o := subOpts{cfg: &cfg} + if len(opts) > 0 { + for _, opt := range opts { + if opt == nil { + continue + } + if err := opt.configureSubscribe(&o); err != nil { + return nil, err + } + } + } + + // If no stream name is specified, the subject cannot be empty. + if subj == _EMPTY_ && o.stream == _EMPTY_ { + return nil, fmt.Errorf("nats: subject required") + } + + // Note that these may change based on the consumer info response we may get. + hasHeartbeats := o.cfg.Heartbeat > 0 + hasFC := o.cfg.FlowControl + + // Some checks for pull subscribers + if isPullMode { + // No deliver subject should be provided + if o.cfg.DeliverSubject != _EMPTY_ { + return nil, ErrPullSubscribeToPushConsumer + } + } + + // Some check/setting specific to queue subs + if queue != _EMPTY_ { + // Queue subscriber cannot have HB or FC (since messages will be randomly dispatched + // to members). We may in the future have a separate NATS subscription that all members + // would subscribe to and server would send on. + if o.cfg.Heartbeat > 0 || o.cfg.FlowControl { + // Not making this a public ErrXXX in case we allow in the future. + return nil, fmt.Errorf("nats: queue subscription doesn't support idle heartbeat nor flow control") + } + + // If this is a queue subscription and no consumer nor durable name was specified, + // then we will use the queue name as a durable name. + if o.consumer == _EMPTY_ && o.cfg.Durable == _EMPTY_ { + if err := checkConsumerName(queue); err != nil { + return nil, err + } + o.cfg.Durable = queue + } + } + + var ( + err error + shouldCreate bool + info *ConsumerInfo + deliver string + stream = o.stream + consumer = o.consumer + isDurable = o.cfg.Durable != _EMPTY_ + consumerBound = o.bound + ctx = o.ctx + skipCInfo = o.skipCInfo + notFoundErr bool + lookupErr bool + nc = js.nc + nms string + hbi time.Duration + ccreq *createConsumerRequest // In case we need to hold onto it for ordered consumers. + maxap int + ) + + // Do some quick checks here for ordered consumers. We do these here instead of spread out + // in the individual SubOpts. + if o.ordered { + // Make sure we are not durable. + if isDurable { + return nil, fmt.Errorf("nats: durable can not be set for an ordered consumer") + } + // Check ack policy. + if o.cfg.AckPolicy != ackPolicyNotSet { + return nil, fmt.Errorf("nats: ack policy can not be set for an ordered consumer") + } + // Check max deliver. + if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 { + return nil, fmt.Errorf("nats: max deliver can not be set for an ordered consumer") + } + // No deliver subject, we pick our own. + if o.cfg.DeliverSubject != _EMPTY_ { + return nil, fmt.Errorf("nats: deliver subject can not be set for an ordered consumer") + } + // Queue groups not allowed. + if queue != _EMPTY_ { + return nil, fmt.Errorf("nats: queues not be set for an ordered consumer") + } + // Check for bound consumers. + if consumer != _EMPTY_ { + return nil, fmt.Errorf("nats: can not bind existing consumer for an ordered consumer") + } + // Check for pull mode. + if isPullMode { + return nil, fmt.Errorf("nats: can not use pull mode for an ordered consumer") + } + // Setup how we need it to be here. + o.cfg.FlowControl = true + o.cfg.AckPolicy = AckNonePolicy + o.cfg.MaxDeliver = 1 + o.cfg.AckWait = 22 * time.Hour // Just set to something known, not utilized. + // Force R1 and MemoryStorage for these. + o.cfg.Replicas = 1 + o.cfg.MemoryStorage = true + + if !hasHeartbeats { + o.cfg.Heartbeat = orderedHeartbeatsInterval + } + hasFC, hasHeartbeats = true, true + o.mack = true // To avoid auto-ack wrapping call below. + hbi = o.cfg.Heartbeat + } + + // In case a consumer has not been set explicitly, then the + // durable name will be used as the consumer name. + if consumer == _EMPTY_ { + consumer = o.cfg.Durable + } + + // Find the stream mapped to the subject if not bound to a stream already. + if stream == _EMPTY_ { + stream, err = js.StreamNameBySubject(subj) + if err != nil { + return nil, err + } + } + + // With an explicit durable name, we can lookup the consumer first + // to which it should be attaching to. + // If SkipConsumerLookup was used, do not call consumer info. + if consumer != _EMPTY_ && !o.skipCInfo { + info, err = js.ConsumerInfo(stream, consumer) + notFoundErr = errors.Is(err, ErrConsumerNotFound) + lookupErr = err == ErrJetStreamNotEnabled || err == ErrTimeout || err == context.DeadlineExceeded + } + + switch { + case info != nil: + deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) + if err != nil { + return nil, err + } + icfg := &info.Config + hasFC, hbi = icfg.FlowControl, icfg.Heartbeat + hasHeartbeats = hbi > 0 + maxap = icfg.MaxAckPending + case (err != nil && !notFoundErr) || (notFoundErr && consumerBound): + // If the consumer is being bound and we got an error on pull subscribe then allow the error. + if !(isPullMode && lookupErr && consumerBound) { + return nil, err + } + case skipCInfo: + // When skipping consumer info, need to rely on the manually passed sub options + // to match the expected behavior from the subscription. + hasFC, hbi = o.cfg.FlowControl, o.cfg.Heartbeat + hasHeartbeats = hbi > 0 + maxap = o.cfg.MaxAckPending + deliver = o.cfg.DeliverSubject + if consumerBound { + break + } + + // When not bound to a consumer already, proceed to create. + fallthrough + default: + // Attempt to create consumer if not found nor using Bind. + shouldCreate = true + if o.cfg.DeliverSubject != _EMPTY_ { + deliver = o.cfg.DeliverSubject + } else if !isPullMode { + deliver = nc.NewInbox() + cfg.DeliverSubject = deliver + } + // Do filtering always, server will clear as needed. + cfg.FilterSubject = subj + + // Pass the queue to the consumer config + if queue != _EMPTY_ { + cfg.DeliverGroup = queue + } + + // If not set, default to deliver all + if cfg.DeliverPolicy == deliverPolicyNotSet { + cfg.DeliverPolicy = DeliverAllPolicy + } + // If not set, default to ack explicit. + if cfg.AckPolicy == ackPolicyNotSet { + cfg.AckPolicy = AckExplicitPolicy + } + // If not set, default to instant + if cfg.ReplayPolicy == replayPolicyNotSet { + cfg.ReplayPolicy = ReplayInstantPolicy + } + + // If we have acks at all and the MaxAckPending is not set go ahead + // and set to the internal max for channel based consumers + if cfg.MaxAckPending == 0 && ch != nil && cfg.AckPolicy != AckNonePolicy { + cfg.MaxAckPending = cap(ch) + } + // Create request here. + ccreq = &createConsumerRequest{ + Stream: stream, + Config: &cfg, + } + hbi = cfg.Heartbeat + } + + if isPullMode { + nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, consumer) + deliver = nc.NewInbox() + // for pull consumers, create a wildcard subscription to differentiate pull requests + deliver += ".*" + } + + // In case this has a context, then create a child context that + // is possible to cancel via unsubscribe / drain. + var cancel func() + if ctx != nil { + ctx, cancel = context.WithCancel(ctx) + } + + jsi := &jsSub{ + js: js, + stream: stream, + consumer: consumer, + deliver: deliver, + hbi: hbi, + ordered: o.ordered, + ccreq: ccreq, + dseq: 1, + pull: isPullMode, + nms: nms, + psubj: subj, + cancel: cancel, + ackNone: o.cfg.AckPolicy == AckNonePolicy, + } + + // Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy + if cb != nil && !o.mack && o.cfg.AckPolicy != AckNonePolicy { + ocb := cb + cb = func(m *Msg) { ocb(m); m.Ack() } + } + sub, err := nc.subscribe(deliver, queue, cb, ch, isSync, jsi) + if err != nil { + return nil, err + } + + // If we fail and we had the sub we need to cleanup, but can't just do a straight Unsubscribe or Drain. + // We need to clear the jsi so we do not remove any durables etc. + cleanUpSub := func() { + if sub != nil { + sub.mu.Lock() + sub.jsi = nil + sub.mu.Unlock() + sub.Unsubscribe() + } + } + + // If we are creating or updating let's process that request. + consName := o.cfg.Name + if shouldCreate { + if cfg.Durable != "" { + consName = cfg.Durable + } else if consName == "" { + consName = getHash(nuid.Next()) + } + info, err := js.upsertConsumer(stream, consName, ccreq.Config) + if err != nil { + var apiErr *APIError + if ok := errors.As(err, &apiErr); !ok { + cleanUpSub() + return nil, err + } + if consumer == _EMPTY_ || + (apiErr.ErrorCode != JSErrCodeConsumerAlreadyExists && apiErr.ErrorCode != JSErrCodeConsumerNameExists) { + cleanUpSub() + if errors.Is(apiErr, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, err + } + // We will not be using this sub here if we were push based. + if !isPullMode { + cleanUpSub() + } + + info, err = js.ConsumerInfo(stream, consumer) + if err != nil { + return nil, err + } + deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) + if err != nil { + return nil, err + } + + if !isPullMode { + // We can't reuse the channel, so if one was passed, we need to create a new one. + if isSync { + ch = make(chan *Msg, cap(ch)) + } else if ch != nil { + // User provided (ChanSubscription), simply try to drain it. + for done := false; !done; { + select { + case <-ch: + default: + done = true + } + } + } + jsi.deliver = deliver + jsi.hbi = info.Config.Heartbeat + + // Recreate the subscription here. + sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, isSync, jsi) + if err != nil { + return nil, err + } + hasFC = info.Config.FlowControl + hasHeartbeats = info.Config.Heartbeat > 0 + } + } else { + // Since the library created the JS consumer, it will delete it on Unsubscribe()/Drain() + sub.mu.Lock() + sub.jsi.dc = true + sub.jsi.pending = info.NumPending + info.Delivered.Consumer + // If this is an ephemeral, we did not have a consumer name, we get it from the info + // after the AddConsumer returns. + if consumer == _EMPTY_ { + sub.jsi.consumer = info.Name + if isPullMode { + sub.jsi.nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, info.Name) + } + } + sub.mu.Unlock() + } + // Capture max ack pending from the info response here which covers both + // success and failure followed by consumer lookup. + maxap = info.Config.MaxAckPending + } + + // If maxap is greater than the default sub's pending limit, use that. + if maxap > DefaultSubPendingMsgsLimit { + // For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit + bl := maxap * 1024 * 1024 + if bl < DefaultSubPendingBytesLimit { + bl = DefaultSubPendingBytesLimit + } + sub.SetPendingLimits(maxap, bl) + } + + // Do heartbeats last if needed. + if hasHeartbeats { + sub.scheduleHeartbeatCheck() + } + // For ChanSubscriptions, if we know that there is flow control, we will + // start a go routine that evaluates the number of delivered messages + // and process flow control. + if sub.Type() == ChanSubscription && hasFC { + sub.chanSubcheckForFlowControlResponse() + } + + // Wait for context to get canceled if there is one. + if ctx != nil { + go func() { + <-ctx.Done() + sub.Unsubscribe() + }() + } + + return sub, nil +} + +// InitialConsumerPending returns the number of messages pending to be +// delivered to the consumer when the subscription was created. +func (sub *Subscription) InitialConsumerPending() (uint64, error) { + sub.mu.Lock() + defer sub.mu.Unlock() + if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { + return 0, fmt.Errorf("%w: not a JetStream subscription", ErrTypeSubscription) + } + return sub.jsi.pending, nil +} + +// This long-lived routine is used per ChanSubscription to check +// on the number of delivered messages and check for flow control response. +func (sub *Subscription) chanSubcheckForFlowControlResponse() { + sub.mu.Lock() + // We don't use defer since if we need to send an RC reply, we need + // to do it outside the sub's lock. So doing explicit unlock... + if sub.closed { + sub.mu.Unlock() + return + } + var fcReply string + var nc *Conn + + jsi := sub.jsi + if jsi.csfct == nil { + jsi.csfct = time.AfterFunc(chanSubFCCheckInterval, sub.chanSubcheckForFlowControlResponse) + } else { + fcReply = sub.checkForFlowControlResponse() + nc = sub.conn + // Do the reset here under the lock, it's ok... + jsi.csfct.Reset(chanSubFCCheckInterval) + } + sub.mu.Unlock() + // This call will return an error (which we don't care here) + // if nc is nil or fcReply is empty. + nc.Publish(fcReply, nil) +} + +// ErrConsumerSequenceMismatch represents an error from a consumer +// that received a Heartbeat including sequence different to the +// one expected from the view of the client. +type ErrConsumerSequenceMismatch struct { + // StreamResumeSequence is the stream sequence from where the consumer + // should resume consuming from the stream. + StreamResumeSequence uint64 + + // ConsumerSequence is the sequence of the consumer that is behind. + ConsumerSequence uint64 + + // LastConsumerSequence is the sequence of the consumer when the heartbeat + // was received. + LastConsumerSequence uint64 +} + +func (ecs *ErrConsumerSequenceMismatch) Error() string { + return fmt.Sprintf("nats: sequence mismatch for consumer at sequence %d (%d sequences behind), should restart consumer from stream sequence %d", + ecs.ConsumerSequence, + ecs.LastConsumerSequence-ecs.ConsumerSequence, + ecs.StreamResumeSequence, + ) +} + +// isJSControlMessage will return true if this is an empty control status message +// and indicate what type of control message it is, say jsCtrlHB or jsCtrlFC +func isJSControlMessage(msg *Msg) (bool, int) { + if len(msg.Data) > 0 || msg.Header.Get(statusHdr) != controlMsg { + return false, 0 + } + val := msg.Header.Get(descrHdr) + if strings.HasPrefix(val, "Idle") { + return true, jsCtrlHB + } + if strings.HasPrefix(val, "Flow") { + return true, jsCtrlFC + } + return true, 0 +} + +// Keeps track of the incoming message's reply subject so that the consumer's +// state (deliver sequence, etc..) can be checked against heartbeats. +// We will also bump the incoming data message sequence that is used in FC cases. +// Runs under the subscription lock +func (sub *Subscription) trackSequences(reply string) { + // For flow control, keep track of incoming message sequence. + sub.jsi.fciseq++ + sub.jsi.cmeta = reply +} + +// Check to make sure messages are arriving in order. +// Returns true if the sub had to be replaced. Will cause upper layers to return. +// The caller has verified that sub.jsi != nil and that this is not a control message. +// Lock should be held. +func (sub *Subscription) checkOrderedMsgs(m *Msg) bool { + // Ignore msgs with no reply like HBs and flow control, they are handled elsewhere. + if m.Reply == _EMPTY_ { + return false + } + + // Normal message here. + tokens, err := parser.GetMetadataFields(m.Reply) + if err != nil { + return false + } + sseq, dseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) + + jsi := sub.jsi + if dseq != jsi.dseq { + sub.resetOrderedConsumer(jsi.sseq + 1) + return true + } + // Update our tracking here. + jsi.dseq, jsi.sseq = dseq+1, sseq + return false +} + +// Update and replace sid. +// Lock should be held on entry but will be unlocked to prevent lock inversion. +func (sub *Subscription) applyNewSID() (osid int64) { + nc := sub.conn + sub.mu.Unlock() + + nc.subsMu.Lock() + osid = sub.sid + delete(nc.subs, osid) + // Place new one. + nc.ssid++ + nsid := nc.ssid + nc.subs[nsid] = sub + nc.subsMu.Unlock() + + sub.mu.Lock() + sub.sid = nsid + return osid +} + +// We are here if we have detected a gap with an ordered consumer. +// We will create a new consumer and rewire the low level subscription. +// Lock should be held. +func (sub *Subscription) resetOrderedConsumer(sseq uint64) { + nc := sub.conn + if sub.jsi == nil || nc == nil || sub.closed { + return + } + + var maxStr string + // If there was an AUTO_UNSUB done, we need to adjust the new value + // to send after the SUB for the new sid. + if sub.max > 0 { + if sub.jsi.fciseq < sub.max { + adjustedMax := sub.max - sub.jsi.fciseq + maxStr = strconv.Itoa(int(adjustedMax)) + } else { + // We are already at the max, so we should just unsub the + // existing sub and be done + go func(sid int64) { + nc.mu.Lock() + nc.bw.appendString(fmt.Sprintf(unsubProto, sid, _EMPTY_)) + nc.kickFlusher() + nc.mu.Unlock() + }(sub.sid) + return + } + } + + // Quick unsubscribe. Since we know this is a simple push subscriber we do in place. + osid := sub.applyNewSID() + + // Grab new inbox. + newDeliver := nc.NewInbox() + sub.Subject = newDeliver + + // Snapshot the new sid under sub lock. + nsid := sub.sid + + // We are still in the low level readLoop for the connection so we need + // to spin a go routine to try to create the new consumer. + go func() { + // Unsubscribe and subscribe with new inbox and sid. + // Remap a new low level sub into this sub since its client accessible. + // This is done here in this go routine to prevent lock inversion. + nc.mu.Lock() + nc.bw.appendString(fmt.Sprintf(unsubProto, osid, _EMPTY_)) + nc.bw.appendString(fmt.Sprintf(subProto, newDeliver, _EMPTY_, nsid)) + if maxStr != _EMPTY_ { + nc.bw.appendString(fmt.Sprintf(unsubProto, nsid, maxStr)) + } + nc.kickFlusher() + nc.mu.Unlock() + + pushErr := func(err error) { + nc.handleConsumerSequenceMismatch(sub, fmt.Errorf("%w: recreating ordered consumer", err)) + nc.unsubscribe(sub, 0, true) + } + + sub.mu.Lock() + jsi := sub.jsi + // Reset some items in jsi. + jsi.dseq = 1 + jsi.cmeta = _EMPTY_ + jsi.fcr, jsi.fcd = _EMPTY_, 0 + jsi.deliver = newDeliver + // Reset consumer request for starting policy. + cfg := jsi.ccreq.Config + cfg.DeliverSubject = newDeliver + cfg.DeliverPolicy = DeliverByStartSequencePolicy + cfg.OptStartSeq = sseq + // In case the consumer was created with a start time, we need to clear it + // since we are now using a start sequence. + cfg.OptStartTime = nil + + js := jsi.js + sub.mu.Unlock() + + consName := nuid.Next() + cinfo, err := js.upsertConsumer(jsi.stream, consName, cfg) + if err != nil { + var apiErr *APIError + if errors.Is(err, ErrJetStreamNotEnabled) || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) { + // if creating consumer failed, retry + return + } else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeInsufficientResourcesErr { + // retry for insufficient resources, as it may mean that client is connected to a running + // server in cluster while the server hosting R1 JetStream resources is restarting + return + } + pushErr(err) + return + } + + sub.mu.Lock() + jsi.consumer = cinfo.Name + sub.mu.Unlock() + }() +} + +// For jetstream subscriptions, returns the number of delivered messages. +// For ChanSubscription, this value is computed based on the known number +// of messages added to the channel minus the current size of that channel. +// Lock held on entry +func (sub *Subscription) getJSDelivered() uint64 { + if sub.typ == ChanSubscription { + return sub.jsi.fciseq - uint64(len(sub.mch)) + } + return sub.delivered +} + +// checkForFlowControlResponse will check to see if we should send a flow control response +// based on the subscription current delivered index and the target. +// Runs under subscription lock +func (sub *Subscription) checkForFlowControlResponse() string { + // Caller has verified that there is a sub.jsi and fc + jsi := sub.jsi + jsi.active = true + if sub.getJSDelivered() >= jsi.fcd { + fcr := jsi.fcr + jsi.fcr, jsi.fcd = _EMPTY_, 0 + return fcr + } + return _EMPTY_ +} + +// Record an inbound flow control message. +// Runs under subscription lock +func (sub *Subscription) scheduleFlowControlResponse(reply string) { + sub.jsi.fcr, sub.jsi.fcd = reply, sub.jsi.fciseq +} + +// Checks for activity from our consumer. +// If we do not think we are active send an async error. +func (sub *Subscription) activityCheck() { + sub.mu.Lock() + jsi := sub.jsi + if jsi == nil || sub.closed { + sub.mu.Unlock() + return + } + + active := jsi.active + jsi.hbc.Reset(jsi.hbi * hbcThresh) + jsi.active = false + nc := sub.conn + sub.mu.Unlock() + + if !active { + if !jsi.ordered || nc.Status() != CONNECTED { + nc.mu.Lock() + if errCB := nc.Opts.AsyncErrorCB; errCB != nil { + nc.ach.push(func() { errCB(nc, sub, ErrConsumerNotActive) }) + } + nc.mu.Unlock() + return + } + sub.mu.Lock() + sub.resetOrderedConsumer(jsi.sseq + 1) + sub.mu.Unlock() + } +} + +// scheduleHeartbeatCheck sets up the timer check to make sure we are active +// or receiving idle heartbeats.. +func (sub *Subscription) scheduleHeartbeatCheck() { + sub.mu.Lock() + defer sub.mu.Unlock() + + jsi := sub.jsi + if jsi == nil { + return + } + + if jsi.hbc == nil { + jsi.hbc = time.AfterFunc(jsi.hbi*hbcThresh, sub.activityCheck) + } else { + jsi.hbc.Reset(jsi.hbi * hbcThresh) + } +} + +// handleConsumerSequenceMismatch will send an async error that can be used to restart a push based consumer. +func (nc *Conn) handleConsumerSequenceMismatch(sub *Subscription, err error) { + nc.mu.Lock() + errCB := nc.Opts.AsyncErrorCB + if errCB != nil { + nc.ach.push(func() { errCB(nc, sub, err) }) + } + nc.mu.Unlock() +} + +// checkForSequenceMismatch will make sure we have not missed any messages since last seen. +func (nc *Conn) checkForSequenceMismatch(msg *Msg, s *Subscription, jsi *jsSub) { + // Process heartbeat received, get latest control metadata if present. + s.mu.Lock() + ctrl, ordered := jsi.cmeta, jsi.ordered + jsi.active = true + s.mu.Unlock() + + if ctrl == _EMPTY_ { + return + } + + tokens, err := parser.GetMetadataFields(ctrl) + if err != nil { + return + } + + // Consumer sequence. + var ldseq string + dseq := tokens[parser.AckConsumerSeqTokenPos] + hdr := msg.Header[lastConsumerSeqHdr] + if len(hdr) == 1 { + ldseq = hdr[0] + } + + // Detect consumer sequence mismatch and whether + // should restart the consumer. + if ldseq != dseq { + // Dispatch async error including details such as + // from where the consumer could be restarted. + sseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) + if ordered { + s.mu.Lock() + s.resetOrderedConsumer(jsi.sseq + 1) + s.mu.Unlock() + } else { + ecs := &ErrConsumerSequenceMismatch{ + StreamResumeSequence: uint64(sseq), + ConsumerSequence: parser.ParseNum(dseq), + LastConsumerSequence: parser.ParseNum(ldseq), + } + nc.handleConsumerSequenceMismatch(s, ecs) + } + } +} + +type streamRequest struct { + Subject string `json:"subject,omitempty"` +} + +type streamNamesResponse struct { + apiResponse + apiPaged + Streams []string `json:"streams"` +} + +type subOpts struct { + // For attaching. + stream, consumer string + // For creating or updating. + cfg *ConsumerConfig + // For binding a subscription to a consumer without creating it. + bound bool + // For manual ack + mack bool + // For an ordered consumer. + ordered bool + ctx context.Context + + // To disable calling ConsumerInfo + skipCInfo bool +} + +// SkipConsumerLookup will omit looking up consumer when [Bind], [Durable] +// or [ConsumerName] are provided. +// +// NOTE: This setting may cause an existing consumer to be overwritten. Also, +// because consumer lookup is skipped, all consumer options like AckPolicy, +// DeliverSubject etc. need to be provided even if consumer already exists. +func SkipConsumerLookup() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.skipCInfo = true + return nil + }) +} + +// OrderedConsumer will create a FIFO direct/ephemeral consumer for in order delivery of messages. +// There are no redeliveries and no acks, and flow control and heartbeats will be added but +// will be taken care of without additional client code. +func OrderedConsumer() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.ordered = true + return nil + }) +} + +// ManualAck disables auto ack functionality for async subscriptions. +func ManualAck() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.mack = true + return nil + }) +} + +// Description will set the description for the created consumer. +func Description(description string) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.Description = description + return nil + }) +} + +// Durable defines the consumer name for JetStream durable subscribers. +// This function will return ErrInvalidConsumerName if the name contains +// any dot ".". +func Durable(consumer string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if opts.cfg.Durable != _EMPTY_ { + return fmt.Errorf("nats: option Durable set more than once") + } + if opts.consumer != _EMPTY_ && opts.consumer != consumer { + return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer) + } + if err := checkConsumerName(consumer); err != nil { + return err + } + + opts.cfg.Durable = consumer + return nil + }) +} + +// DeliverAll will configure a Consumer to receive all the +// messages from a Stream. +func DeliverAll() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverAllPolicy + return nil + }) +} + +// DeliverLast configures a Consumer to receive messages +// starting with the latest one. +func DeliverLast() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverLastPolicy + return nil + }) +} + +// DeliverLastPerSubject configures a Consumer to receive messages +// starting with the latest one for each filtered subject. +func DeliverLastPerSubject() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverLastPerSubjectPolicy + return nil + }) +} + +// DeliverNew configures a Consumer to receive messages +// published after the subscription. +func DeliverNew() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverNewPolicy + return nil + }) +} + +// StartSequence configures a Consumer to receive +// messages from a start sequence. +func StartSequence(seq uint64) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverByStartSequencePolicy + opts.cfg.OptStartSeq = seq + return nil + }) +} + +// StartTime configures a Consumer to receive +// messages from a start time. +func StartTime(startTime time.Time) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverByStartTimePolicy + opts.cfg.OptStartTime = &startTime + return nil + }) +} + +// AckNone requires no acks for delivered messages. +func AckNone() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.AckPolicy = AckNonePolicy + return nil + }) +} + +// AckAll when acking a sequence number, this implicitly acks all sequences +// below this one as well. +func AckAll() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.AckPolicy = AckAllPolicy + return nil + }) +} + +// AckExplicit requires ack or nack for all messages. +func AckExplicit() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.AckPolicy = AckExplicitPolicy + return nil + }) +} + +// MaxDeliver sets the number of redeliveries for a message. +func MaxDeliver(n int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxDeliver = n + return nil + }) +} + +// MaxAckPending sets the number of outstanding acks that are allowed before +// message delivery is halted. +func MaxAckPending(n int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxAckPending = n + return nil + }) +} + +// ReplayOriginal replays the messages at the original speed. +func ReplayOriginal() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.ReplayPolicy = ReplayOriginalPolicy + return nil + }) +} + +// ReplayInstant replays the messages as fast as possible. +func ReplayInstant() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.ReplayPolicy = ReplayInstantPolicy + return nil + }) +} + +// RateLimit is the Bits per sec rate limit applied to a push consumer. +func RateLimit(n uint64) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.RateLimit = n + return nil + }) +} + +// BackOff is an array of time durations that represent the time to delay based on delivery count. +func BackOff(backOff []time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.BackOff = backOff + return nil + }) +} + +// BindStream binds a consumer to a stream explicitly based on a name. +// When a stream name is not specified, the library uses the subscribe +// subject as a way to find the stream name. It is done by making a request +// to the server to get list of stream names that have a filter for this +// subject. If the returned list contains a single stream, then this +// stream name will be used, otherwise the `ErrNoMatchingStream` is returned. +// To avoid the stream lookup, provide the stream name with this function. +// See also `Bind()`. +func BindStream(stream string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if opts.stream != _EMPTY_ && opts.stream != stream { + return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) + } + + opts.stream = stream + return nil + }) +} + +// Bind binds a subscription to an existing consumer from a stream without attempting to create. +// The first argument is the stream name and the second argument will be the consumer name. +func Bind(stream, consumer string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if stream == _EMPTY_ { + return ErrStreamNameRequired + } + if consumer == _EMPTY_ { + return ErrConsumerNameRequired + } + + // In case of pull subscribers, the durable name is a required parameter + // so check that they are not different. + if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer { + return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer) + } + if opts.stream != _EMPTY_ && opts.stream != stream { + return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) + } + opts.stream = stream + opts.consumer = consumer + opts.bound = true + return nil + }) +} + +// EnableFlowControl enables flow control for a push based consumer. +func EnableFlowControl() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.FlowControl = true + return nil + }) +} + +// IdleHeartbeat enables push based consumers to have idle heartbeats delivered. +// For pull consumers, idle heartbeat has to be set on each [Fetch] call. +func IdleHeartbeat(duration time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.Heartbeat = duration + return nil + }) +} + +// DeliverSubject specifies the JetStream consumer deliver subject. +// +// This option is used only in situations where the consumer does not exist +// and a creation request is sent to the server. If not provided, an inbox +// will be selected. +// If a consumer exists, then the NATS subscription will be created on +// the JetStream consumer's DeliverSubject, not necessarily this subject. +func DeliverSubject(subject string) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverSubject = subject + return nil + }) +} + +// HeadersOnly() will instruct the consumer to only deliver headers and no payloads. +func HeadersOnly() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.HeadersOnly = true + return nil + }) +} + +// MaxRequestBatch sets the maximum pull consumer batch size that a Fetch() +// can request. +func MaxRequestBatch(max int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxRequestBatch = max + return nil + }) +} + +// MaxRequestExpires sets the maximum pull consumer request expiration that a +// Fetch() can request (using the Fetch's timeout value). +func MaxRequestExpires(max time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxRequestExpires = max + return nil + }) +} + +// MaxRequesMaxBytes sets the maximum pull consumer request bytes that a +// Fetch() can receive. +func MaxRequestMaxBytes(bytes int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxRequestMaxBytes = bytes + return nil + }) +} + +// InactiveThreshold indicates how long the server should keep a consumer +// after detecting a lack of activity. In NATS Server 2.8.4 and earlier, this +// option only applies to ephemeral consumers. In NATS Server 2.9.0 and later, +// this option applies to both ephemeral and durable consumers, allowing durable +// consumers to also be deleted automatically after the inactivity threshold has +// passed. +func InactiveThreshold(threshold time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + if threshold < 0 { + return fmt.Errorf("invalid InactiveThreshold value (%v), needs to be greater or equal to 0", threshold) + } + opts.cfg.InactiveThreshold = threshold + return nil + }) +} + +// ConsumerReplicas sets the number of replica count for a consumer. +func ConsumerReplicas(replicas int) SubOpt { + return subOptFn(func(opts *subOpts) error { + if replicas < 1 { + return fmt.Errorf("invalid ConsumerReplicas value (%v), needs to be greater than 0", replicas) + } + opts.cfg.Replicas = replicas + return nil + }) +} + +// ConsumerMemoryStorage sets the memory storage to true for a consumer. +func ConsumerMemoryStorage() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MemoryStorage = true + return nil + }) +} + +// ConsumerName sets the name for a consumer. +func ConsumerName(name string) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.Name = name + return nil + }) +} + +// ConsumerFilterSubjects can be used to set multiple subject filters on the consumer. +// It has to be used in conjunction with [nats.BindStream] and +// with empty 'subject' parameter. +func ConsumerFilterSubjects(subjects ...string) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.FilterSubjects = subjects + return nil + }) +} + +func (sub *Subscription) ConsumerInfo() (*ConsumerInfo, error) { + sub.mu.Lock() + // TODO(dlc) - Better way to mark especially if we attach. + if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { + sub.mu.Unlock() + return nil, ErrTypeSubscription + } + + // Consumer info lookup should fail if in direct mode. + js := sub.jsi.js + stream, consumer := sub.jsi.stream, sub.jsi.consumer + sub.mu.Unlock() + + return js.getConsumerInfo(stream, consumer) +} + +type pullOpts struct { + maxBytes int + ttl time.Duration + ctx context.Context + hb time.Duration +} + +// PullOpt are the options that can be passed when pulling a batch of messages. +type PullOpt interface { + configurePull(opts *pullOpts) error +} + +// PullMaxWaiting defines the max inflight pull requests. +func PullMaxWaiting(n int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxWaiting = n + return nil + }) +} + +type PullHeartbeat time.Duration + +func (h PullHeartbeat) configurePull(opts *pullOpts) error { + if h <= 0 { + return fmt.Errorf("%w: idle heartbeat has to be greater than 0", ErrInvalidArg) + } + opts.hb = time.Duration(h) + return nil +} + +// PullMaxBytes defines the max bytes allowed for a fetch request. +type PullMaxBytes int + +func (n PullMaxBytes) configurePull(opts *pullOpts) error { + opts.maxBytes = int(n) + return nil +} + +var ( + // errNoMessages is an error that a Fetch request using no_wait can receive to signal + // that there are no more messages available. + errNoMessages = errors.New("nats: no messages") + + // errRequestsPending is an error that represents a sub.Fetch requests that was using + // no_wait and expires time got discarded by the server. + errRequestsPending = errors.New("nats: requests pending") +) + +// Returns if the given message is a user message or not, and if +// `checkSts` is true, returns appropriate error based on the +// content of the status (404, etc..) +func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) { + // Assume user message + usrMsg = true + + // If payload or no header, consider this a user message + if len(msg.Data) > 0 || len(msg.Header) == 0 { + return + } + // Look for status header + val := msg.Header.Get(statusHdr) + // If not present, then this is considered a user message + if val == _EMPTY_ { + return + } + // At this point, this is not a user message since there is + // no payload and a "Status" header. + usrMsg = false + + // If we don't care about status, we are done. + if !checkSts { + return + } + + // if it's a heartbeat message, report as not user msg + if isHb, _ := isJSControlMessage(msg); isHb { + return + } + switch val { + case noResponders: + err = ErrNoResponders + case noMessagesSts: + // 404 indicates that there are no messages. + err = errNoMessages + case reqTimeoutSts: + // In case of a fetch request with no wait request and expires time, + // need to skip 408 errors and retry. + if isNoWait { + err = errRequestsPending + } else { + // Older servers may send a 408 when a request in the server was expired + // and interest is still found, which will be the case for our + // implementation. Regardless, ignore 408 errors until receiving at least + // one message when making requests without no_wait. + err = ErrTimeout + } + case jetStream409Sts: + if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "consumer deleted") { + err = ErrConsumerDeleted + break + } + + if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "leadership change") { + err = ErrConsumerLeadershipChanged + break + } + fallthrough + default: + err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr)) + } + return +} + +// Fetch pulls a batch of messages from a stream for a pull consumer. +func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { + if sub == nil { + return nil, ErrBadSubscription + } + if batch < 1 { + return nil, ErrInvalidArg + } + + var o pullOpts + for _, opt := range opts { + if err := opt.configurePull(&o); err != nil { + return nil, err + } + } + if o.ctx != nil && o.ttl != 0 { + return nil, ErrContextAndTimeout + } + + sub.mu.Lock() + jsi := sub.jsi + // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, + // so check for jsi.pull boolean instead. + if jsi == nil || !jsi.pull { + sub.mu.Unlock() + return nil, ErrTypeSubscription + } + + nc := sub.conn + nms := sub.jsi.nms + rply, _ := newFetchInbox(jsi.deliver) + js := sub.jsi.js + pmc := len(sub.mch) > 0 + + // All fetch requests have an expiration, in case of no explicit expiration + // then the default timeout of the JetStream context is used. + ttl := o.ttl + if ttl == 0 { + ttl = js.opts.wait + } + sub.mu.Unlock() + + // Use the given context or setup a default one for the span + // of the pull batch request. + var ( + ctx = o.ctx + err error + cancel context.CancelFunc + ) + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), ttl) + } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { + // Prevent from passing the background context which will just block + // and cannot be canceled either. + if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { + return nil, ErrNoDeadlineContext + } + + // If the context did not have a deadline, then create a new child context + // that will use the default timeout from the JS context. + ctx, cancel = context.WithTimeout(ctx, ttl) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + // if heartbeat is set, validate it against the context timeout + if o.hb > 0 { + deadline, _ := ctx.Deadline() + if 2*o.hb >= time.Until(deadline) { + return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) + } + } + + // Check if context not done already before making the request. + select { + case <-ctx.Done(): + if o.ctx != nil { // Timeout or Cancel triggered by context object option + err = ctx.Err() + } else { // Timeout triggered by timeout option + err = ErrTimeout + } + default: + } + if err != nil { + return nil, err + } + + var ( + msgs = make([]*Msg, 0, batch) + msg *Msg + ) + for pmc && len(msgs) < batch { + // Check next msg with booleans that say that this is an internal call + // for a pull subscribe (so don't reject it) and don't wait if there + // are no messages. + msg, err = sub.nextMsgWithContext(ctx, true, false) + if err != nil { + if err == errNoMessages { + err = nil + } + break + } + // Check msg but just to determine if this is a user message + // or status message, however, we don't care about values of status + // messages at this point in the Fetch() call, so checkMsg can't + // return an error. + if usrMsg, _ := checkMsg(msg, false, false); usrMsg { + msgs = append(msgs, msg) + } + } + var hbTimer *time.Timer + var hbErr error + if err == nil && len(msgs) < batch { + // For batch real size of 1, it does not make sense to set no_wait in + // the request. + noWait := batch-len(msgs) > 1 + + var nr nextRequest + + sendReq := func() error { + // The current deadline for the context will be used + // to set the expires TTL for a fetch request. + deadline, _ := ctx.Deadline() + ttl = time.Until(deadline) + + // Check if context has already been canceled or expired. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Make our request expiration a bit shorter than the current timeout. + expires := ttl + if ttl >= 20*time.Millisecond { + expires = ttl - 10*time.Millisecond + } + + nr.Batch = batch - len(msgs) + nr.Expires = expires + nr.NoWait = noWait + nr.MaxBytes = o.maxBytes + if 2*o.hb < expires { + nr.Heartbeat = o.hb + } else { + nr.Heartbeat = 0 + } + req, _ := json.Marshal(nr) + if err := nc.PublishRequest(nms, rply, req); err != nil { + return err + } + if o.hb > 0 { + if hbTimer == nil { + hbTimer = time.AfterFunc(2*o.hb, func() { + hbErr = ErrNoHeartbeat + cancel() + }) + } else { + hbTimer.Reset(2 * o.hb) + } + } + return nil + } + + err = sendReq() + for err == nil && len(msgs) < batch { + // Ask for next message and wait if there are no messages + msg, err = sub.nextMsgWithContext(ctx, true, true) + if err == nil { + if hbTimer != nil { + hbTimer.Reset(2 * o.hb) + } + var usrMsg bool + + usrMsg, err = checkMsg(msg, true, noWait) + if err == nil && usrMsg { + msgs = append(msgs, msg) + } else if noWait && (err == errNoMessages || err == errRequestsPending) && len(msgs) == 0 { + // If we have a 404/408 for our "no_wait" request and have + // not collected any message, then resend request to + // wait this time. + noWait = false + err = sendReq() + } else if err == ErrTimeout && len(msgs) == 0 { + // If we get a 408, we will bail if we already collected some + // messages, otherwise ignore and go back calling nextMsg. + err = nil + } + } + } + if hbTimer != nil { + hbTimer.Stop() + } + } + // If there is at least a message added to msgs, then need to return OK and no error + if err != nil && len(msgs) == 0 { + if hbErr != nil { + return nil, hbErr + } + return nil, o.checkCtxErr(err) + } + return msgs, nil +} + +// newFetchInbox returns subject used as reply subject when sending pull requests +// as well as request ID. For non-wildcard subject, request ID is empty and +// passed subject is not transformed +func newFetchInbox(subj string) (string, string) { + if !strings.HasSuffix(subj, ".*") { + return subj, "" + } + reqID := nuid.Next() + var sb strings.Builder + sb.WriteString(subj[:len(subj)-1]) + sb.WriteString(reqID) + return sb.String(), reqID +} + +func subjectMatchesReqID(subject, reqID string) bool { + subjectParts := strings.Split(subject, ".") + if len(subjectParts) < 2 { + return false + } + return subjectParts[len(subjectParts)-1] == reqID +} + +// MessageBatch provides methods to retrieve messages consumed using [Subscribe.FetchBatch]. +type MessageBatch interface { + // Messages returns a channel on which messages will be published. + Messages() <-chan *Msg + + // Error returns an error encountered when fetching messages. + Error() error + + // Done signals end of execution. + Done() <-chan struct{} +} + +type messageBatch struct { + msgs chan *Msg + err error + done chan struct{} +} + +func (mb *messageBatch) Messages() <-chan *Msg { + return mb.msgs +} + +func (mb *messageBatch) Error() error { + return mb.err +} + +func (mb *messageBatch) Done() <-chan struct{} { + return mb.done +} + +// FetchBatch pulls a batch of messages from a stream for a pull consumer. +// Unlike [Subscription.Fetch], it is non blocking and returns [MessageBatch], +// allowing to retrieve incoming messages from a channel. +// The returned channel is always closed after all messages for a batch have been +// delivered by the server - it is safe to iterate over it using range. +// +// To avoid using default JetStream timeout as fetch expiry time, use [nats.MaxWait] +// or [nats.Context] (with deadline set). +// +// This method will not return error in case of pull request expiry (even if there are no messages). +// Any other error encountered when receiving messages will cause FetchBatch to stop receiving new messages. +func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, error) { + if sub == nil { + return nil, ErrBadSubscription + } + if batch < 1 { + return nil, ErrInvalidArg + } + + var o pullOpts + for _, opt := range opts { + if err := opt.configurePull(&o); err != nil { + return nil, err + } + } + if o.ctx != nil && o.ttl != 0 { + return nil, ErrContextAndTimeout + } + sub.mu.Lock() + jsi := sub.jsi + // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, + // so check for jsi.pull boolean instead. + if jsi == nil || !jsi.pull { + sub.mu.Unlock() + return nil, ErrTypeSubscription + } + + nc := sub.conn + nms := sub.jsi.nms + rply, reqID := newFetchInbox(sub.jsi.deliver) + js := sub.jsi.js + pmc := len(sub.mch) > 0 + + // All fetch requests have an expiration, in case of no explicit expiration + // then the default timeout of the JetStream context is used. + ttl := o.ttl + if ttl == 0 { + ttl = js.opts.wait + } + sub.mu.Unlock() + + // Use the given context or setup a default one for the span + // of the pull batch request. + var ( + ctx = o.ctx + cancel context.CancelFunc + cancelContext = true + ) + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), ttl) + } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { + // Prevent from passing the background context which will just block + // and cannot be canceled either. + if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { + return nil, ErrNoDeadlineContext + } + + // If the context did not have a deadline, then create a new child context + // that will use the default timeout from the JS context. + ctx, cancel = context.WithTimeout(ctx, ttl) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + // only cancel the context here if we are sure the fetching goroutine has not been started yet + if cancelContext { + cancel() + } + }() + + // if heartbeat is set, validate it against the context timeout + if o.hb > 0 { + deadline, _ := ctx.Deadline() + if 2*o.hb >= time.Until(deadline) { + return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) + } + } + + // Check if context not done already before making the request. + select { + case <-ctx.Done(): + if o.ctx != nil { // Timeout or Cancel triggered by context object option + return nil, ctx.Err() + } else { // Timeout triggered by timeout option + return nil, ErrTimeout + } + default: + } + + result := &messageBatch{ + msgs: make(chan *Msg, batch), + done: make(chan struct{}, 1), + } + var msg *Msg + for pmc && len(result.msgs) < batch { + // Check next msg with booleans that say that this is an internal call + // for a pull subscribe (so don't reject it) and don't wait if there + // are no messages. + msg, err := sub.nextMsgWithContext(ctx, true, false) + if err != nil { + if err == errNoMessages { + err = nil + } + result.err = err + break + } + // Check msg but just to determine if this is a user message + // or status message, however, we don't care about values of status + // messages at this point in the Fetch() call, so checkMsg can't + // return an error. + if usrMsg, _ := checkMsg(msg, false, false); usrMsg { + result.msgs <- msg + } + } + if len(result.msgs) == batch || result.err != nil { + close(result.msgs) + result.done <- struct{}{} + return result, nil + } + + deadline, _ := ctx.Deadline() + ttl = time.Until(deadline) + + // Make our request expiration a bit shorter than the current timeout. + expires := ttl + if ttl >= 20*time.Millisecond { + expires = ttl - 10*time.Millisecond + } + + requestBatch := batch - len(result.msgs) + req := nextRequest{ + Expires: expires, + Batch: requestBatch, + MaxBytes: o.maxBytes, + Heartbeat: o.hb, + } + reqJSON, err := json.Marshal(req) + if err != nil { + close(result.msgs) + result.done <- struct{}{} + result.err = err + return result, nil + } + if err := nc.PublishRequest(nms, rply, reqJSON); err != nil { + if len(result.msgs) == 0 { + return nil, err + } + close(result.msgs) + result.done <- struct{}{} + result.err = err + return result, nil + } + var hbTimer *time.Timer + var hbErr error + if o.hb > 0 { + hbTimer = time.AfterFunc(2*o.hb, func() { + hbErr = ErrNoHeartbeat + cancel() + }) + } + cancelContext = false + go func() { + defer cancel() + var requestMsgs int + for requestMsgs < requestBatch { + // Ask for next message and wait if there are no messages + msg, err = sub.nextMsgWithContext(ctx, true, true) + if err != nil { + break + } + if hbTimer != nil { + hbTimer.Reset(2 * o.hb) + } + var usrMsg bool + + usrMsg, err = checkMsg(msg, true, false) + if err != nil { + if err == ErrTimeout { + if reqID != "" && !subjectMatchesReqID(msg.Subject, reqID) { + // ignore timeout message from server if it comes from a different pull request + continue + } + err = nil + } + break + } + if usrMsg { + result.msgs <- msg + requestMsgs++ + } + } + if err != nil { + if hbErr != nil { + result.err = hbErr + } else { + result.err = o.checkCtxErr(err) + } + } + close(result.msgs) + result.done <- struct{}{} + }() + return result, nil +} + +// checkCtxErr is used to determine whether ErrTimeout should be returned in case of context timeout +func (o *pullOpts) checkCtxErr(err error) error { + if o.ctx == nil && err == context.DeadlineExceeded { + return ErrTimeout + } + return err +} + +func (js *js) getConsumerInfo(stream, consumer string) (*ConsumerInfo, error) { + ctx, cancel := context.WithTimeout(context.Background(), js.opts.wait) + defer cancel() + return js.getConsumerInfoContext(ctx, stream, consumer) +} + +func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer string) (*ConsumerInfo, error) { + ccInfoSubj := fmt.Sprintf(apiConsumerInfoT, stream, consumer) + resp, err := js.apiRequestWithContext(ctx, js.apiSubj(ccInfoSubj), nil) + if err != nil { + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return nil, err + } + + var info consumerResponse + if err := json.Unmarshal(resp.Data, &info); err != nil { + return nil, err + } + if info.Error != nil { + if errors.Is(info.Error, ErrConsumerNotFound) { + return nil, ErrConsumerNotFound + } + if errors.Is(info.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, info.Error + } + if info.Error == nil && info.ConsumerInfo == nil { + return nil, ErrConsumerNotFound + } + return info.ConsumerInfo, nil +} + +// a RequestWithContext with tracing via TraceCB +func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + if js.opts.shouldTrace { + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.RequestSent(subj, data) + } + } + resp, err := js.nc.RequestWithContext(ctx, subj, data) + if err != nil { + return nil, err + } + if js.opts.shouldTrace { + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.ResponseReceived(subj, resp.Data, resp.Header) + } + } + + return resp, nil +} + +func (m *Msg) checkReply() error { + if m == nil || m.Sub == nil { + return ErrMsgNotBound + } + if m.Reply == _EMPTY_ { + return ErrMsgNoReply + } + return nil +} + +// ackReply handles all acks. Will do the right thing for pull and sync mode. +// It ensures that an ack is only sent a single time, regardless of +// how many times it is being called to avoid duplicated acks. +func (m *Msg) ackReply(ackType []byte, sync bool, opts ...AckOpt) error { + var o ackOpts + for _, opt := range opts { + if err := opt.configureAck(&o); err != nil { + return err + } + } + + if err := m.checkReply(); err != nil { + return err + } + + var ackNone bool + var js *js + + sub := m.Sub + sub.mu.Lock() + nc := sub.conn + if jsi := sub.jsi; jsi != nil { + js = jsi.js + ackNone = jsi.ackNone + } + sub.mu.Unlock() + + // Skip if already acked. + if atomic.LoadUint32(&m.ackd) == 1 { + return ErrMsgAlreadyAckd + } + if ackNone { + return ErrCantAckIfConsumerAckNone + } + + usesCtx := o.ctx != nil + usesWait := o.ttl > 0 + + // Only allow either AckWait or Context option to set the timeout. + if usesWait && usesCtx { + return ErrContextAndTimeout + } + + sync = sync || usesCtx || usesWait + ctx := o.ctx + wait := defaultRequestWait + if usesWait { + wait = o.ttl + } else if js != nil { + wait = js.opts.wait + } + + var body []byte + var err error + // This will be > 0 only when called from NakWithDelay() + if o.nakDelay > 0 { + body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, o.nakDelay.Nanoseconds())) + } else { + body = ackType + } + + if sync { + if usesCtx { + _, err = nc.RequestWithContext(ctx, m.Reply, body) + } else { + _, err = nc.Request(m.Reply, body, wait) + } + } else { + err = nc.Publish(m.Reply, body) + } + + // Mark that the message has been acked unless it is ackProgress + // which can be sent many times. + if err == nil && !bytes.Equal(ackType, ackProgress) { + atomic.StoreUint32(&m.ackd, 1) + } + + return err +} + +// Ack acknowledges a message. This tells the server that the message was +// successfully processed and it can move on to the next message. +func (m *Msg) Ack(opts ...AckOpt) error { + return m.ackReply(ackAck, false, opts...) +} + +// AckSync is the synchronous version of Ack. This indicates successful message +// processing. +func (m *Msg) AckSync(opts ...AckOpt) error { + return m.ackReply(ackAck, true, opts...) +} + +// Nak negatively acknowledges a message. This tells the server to redeliver +// the message. You can configure the number of redeliveries by passing +// nats.MaxDeliver when you Subscribe. The default is infinite redeliveries. +func (m *Msg) Nak(opts ...AckOpt) error { + return m.ackReply(ackNak, false, opts...) +} + +// Nak negatively acknowledges a message. This tells the server to redeliver +// the message after the give `delay` duration. You can configure the number +// of redeliveries by passing nats.MaxDeliver when you Subscribe. +// The default is infinite redeliveries. +func (m *Msg) NakWithDelay(delay time.Duration, opts ...AckOpt) error { + if delay > 0 { + opts = append(opts, nakDelay(delay)) + } + return m.ackReply(ackNak, false, opts...) +} + +// Term tells the server to not redeliver this message, regardless of the value +// of nats.MaxDeliver. +func (m *Msg) Term(opts ...AckOpt) error { + return m.ackReply(ackTerm, false, opts...) +} + +// InProgress tells the server that this message is being worked on. It resets +// the redelivery timer on the server. +func (m *Msg) InProgress(opts ...AckOpt) error { + return m.ackReply(ackProgress, false, opts...) +} + +// MsgMetadata is the JetStream metadata associated with received messages. +type MsgMetadata struct { + Sequence SequencePair + NumDelivered uint64 + NumPending uint64 + Timestamp time.Time + Stream string + Consumer string + Domain string +} + +// Metadata retrieves the metadata from a JetStream message. This method will +// return an error for non-JetStream Msgs. +func (m *Msg) Metadata() (*MsgMetadata, error) { + if err := m.checkReply(); err != nil { + return nil, err + } + + tokens, err := parser.GetMetadataFields(m.Reply) + if err != nil { + return nil, err + } + + meta := &MsgMetadata{ + Domain: tokens[parser.AckDomainTokenPos], + NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), + NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), + Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), + Stream: tokens[parser.AckStreamTokenPos], + Consumer: tokens[parser.AckConsumerTokenPos], + } + meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) + meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) + return meta, nil +} + +// AckPolicy determines how the consumer should acknowledge delivered messages. +type AckPolicy int + +const ( + // AckNonePolicy requires no acks for delivered messages. + AckNonePolicy AckPolicy = iota + + // AckAllPolicy when acking a sequence number, this implicitly acks all + // sequences below this one as well. + AckAllPolicy + + // AckExplicitPolicy requires ack or nack for all messages. + AckExplicitPolicy + + // For configuration mismatch check + ackPolicyNotSet = 99 +) + +func jsonString(s string) string { + return "\"" + s + "\"" +} + +func (p *AckPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("none"): + *p = AckNonePolicy + case jsonString("all"): + *p = AckAllPolicy + case jsonString("explicit"): + *p = AckExplicitPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + + return nil +} + +func (p AckPolicy) MarshalJSON() ([]byte, error) { + switch p { + case AckNonePolicy: + return json.Marshal("none") + case AckAllPolicy: + return json.Marshal("all") + case AckExplicitPolicy: + return json.Marshal("explicit") + default: + return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) + } +} + +func (p AckPolicy) String() string { + switch p { + case AckNonePolicy: + return "AckNone" + case AckAllPolicy: + return "AckAll" + case AckExplicitPolicy: + return "AckExplicit" + case ackPolicyNotSet: + return "Not Initialized" + default: + return "Unknown AckPolicy" + } +} + +// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream. +type ReplayPolicy int + +const ( + // ReplayInstantPolicy will replay messages as fast as possible. + ReplayInstantPolicy ReplayPolicy = iota + + // ReplayOriginalPolicy will maintain the same timing as the messages were received. + ReplayOriginalPolicy + + // For configuration mismatch check + replayPolicyNotSet = 99 +) + +func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("instant"): + *p = ReplayInstantPolicy + case jsonString("original"): + *p = ReplayOriginalPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + + return nil +} + +func (p ReplayPolicy) MarshalJSON() ([]byte, error) { + switch p { + case ReplayOriginalPolicy: + return json.Marshal("original") + case ReplayInstantPolicy: + return json.Marshal("instant") + default: + return nil, fmt.Errorf("nats: unknown replay policy %v", p) + } +} + +var ( + ackAck = []byte("+ACK") + ackNak = []byte("-NAK") + ackProgress = []byte("+WPI") + ackTerm = []byte("+TERM") +) + +// DeliverPolicy determines how the consumer should select the first message to deliver. +type DeliverPolicy int + +const ( + // DeliverAllPolicy starts delivering messages from the very beginning of a + // stream. This is the default. + DeliverAllPolicy DeliverPolicy = iota + + // DeliverLastPolicy will start the consumer with the last sequence + // received. + DeliverLastPolicy + + // DeliverNewPolicy will only deliver new messages that are sent after the + // consumer is created. + DeliverNewPolicy + + // DeliverByStartSequencePolicy will deliver messages starting from a given + // sequence. + DeliverByStartSequencePolicy + + // DeliverByStartTimePolicy will deliver messages starting from a given + // time. + DeliverByStartTimePolicy + + // DeliverLastPerSubjectPolicy will start the consumer with the last message + // for all subjects received. + DeliverLastPerSubjectPolicy + + // For configuration mismatch check + deliverPolicyNotSet = 99 +) + +func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("all"), jsonString("undefined"): + *p = DeliverAllPolicy + case jsonString("last"): + *p = DeliverLastPolicy + case jsonString("new"): + *p = DeliverNewPolicy + case jsonString("by_start_sequence"): + *p = DeliverByStartSequencePolicy + case jsonString("by_start_time"): + *p = DeliverByStartTimePolicy + case jsonString("last_per_subject"): + *p = DeliverLastPerSubjectPolicy + } + + return nil +} + +func (p DeliverPolicy) MarshalJSON() ([]byte, error) { + switch p { + case DeliverAllPolicy: + return json.Marshal("all") + case DeliverLastPolicy: + return json.Marshal("last") + case DeliverNewPolicy: + return json.Marshal("new") + case DeliverByStartSequencePolicy: + return json.Marshal("by_start_sequence") + case DeliverByStartTimePolicy: + return json.Marshal("by_start_time") + case DeliverLastPerSubjectPolicy: + return json.Marshal("last_per_subject") + default: + return nil, fmt.Errorf("nats: unknown deliver policy %v", p) + } +} + +// RetentionPolicy determines how messages in a set are retained. +type RetentionPolicy int + +const ( + // LimitsPolicy (default) means that messages are retained until any given limit is reached. + // This could be one of MaxMsgs, MaxBytes, or MaxAge. + LimitsPolicy RetentionPolicy = iota + // InterestPolicy specifies that when all known observables have acknowledged a message it can be removed. + InterestPolicy + // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed. + WorkQueuePolicy +) + +// DiscardPolicy determines how to proceed when limits of messages or bytes are +// reached. +type DiscardPolicy int + +const ( + // DiscardOld will remove older messages to return to the limits. This is + // the default. + DiscardOld DiscardPolicy = iota + //DiscardNew will fail to store new messages. + DiscardNew +) + +const ( + limitsPolicyString = "limits" + interestPolicyString = "interest" + workQueuePolicyString = "workqueue" +) + +func (rp RetentionPolicy) String() string { + switch rp { + case LimitsPolicy: + return "Limits" + case InterestPolicy: + return "Interest" + case WorkQueuePolicy: + return "WorkQueue" + default: + return "Unknown Retention Policy" + } +} + +func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { + switch rp { + case LimitsPolicy: + return json.Marshal(limitsPolicyString) + case InterestPolicy: + return json.Marshal(interestPolicyString) + case WorkQueuePolicy: + return json.Marshal(workQueuePolicyString) + default: + return nil, fmt.Errorf("nats: can not marshal %v", rp) + } +} + +func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(limitsPolicyString): + *rp = LimitsPolicy + case jsonString(interestPolicyString): + *rp = InterestPolicy + case jsonString(workQueuePolicyString): + *rp = WorkQueuePolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func (dp DiscardPolicy) String() string { + switch dp { + case DiscardOld: + return "DiscardOld" + case DiscardNew: + return "DiscardNew" + default: + return "Unknown Discard Policy" + } +} + +func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { + switch dp { + case DiscardOld: + return json.Marshal("old") + case DiscardNew: + return json.Marshal("new") + default: + return nil, fmt.Errorf("nats: can not marshal %v", dp) + } +} + +func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { + switch strings.ToLower(string(data)) { + case jsonString("old"): + *dp = DiscardOld + case jsonString("new"): + *dp = DiscardNew + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +// StorageType determines how messages are stored for retention. +type StorageType int + +const ( + // FileStorage specifies on disk storage. It's the default. + FileStorage StorageType = iota + // MemoryStorage specifies in memory only. + MemoryStorage +) + +const ( + memoryStorageString = "memory" + fileStorageString = "file" +) + +func (st StorageType) String() string { + switch st { + case MemoryStorage: + return "Memory" + case FileStorage: + return "File" + default: + return "Unknown Storage Type" + } +} + +func (st StorageType) MarshalJSON() ([]byte, error) { + switch st { + case MemoryStorage: + return json.Marshal(memoryStorageString) + case FileStorage: + return json.Marshal(fileStorageString) + default: + return nil, fmt.Errorf("nats: can not marshal %v", st) + } +} + +func (st *StorageType) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(memoryStorageString): + *st = MemoryStorage + case jsonString(fileStorageString): + *st = FileStorage + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +type StoreCompression uint8 + +const ( + NoCompression StoreCompression = iota + S2Compression +) + +func (alg StoreCompression) String() string { + switch alg { + case NoCompression: + return "None" + case S2Compression: + return "S2" + default: + return "Unknown StoreCompression" + } +} + +func (alg StoreCompression) MarshalJSON() ([]byte, error) { + var str string + switch alg { + case S2Compression: + str = "s2" + case NoCompression: + str = "none" + default: + return nil, fmt.Errorf("unknown compression algorithm") + } + return json.Marshal(str) +} + +func (alg *StoreCompression) UnmarshalJSON(b []byte) error { + var str string + if err := json.Unmarshal(b, &str); err != nil { + return err + } + switch str { + case "s2": + *alg = S2Compression + case "none": + *alg = NoCompression + default: + return fmt.Errorf("unknown compression algorithm") + } + return nil +} + +// Length of our hash used for named consumers. +const nameHashLen = 8 + +// Computes a hash for the given `name`. +func getHash(name string) string { + sha := sha256.New() + sha.Write([]byte(name)) + b := sha.Sum(nil) + for i := 0; i < nameHashLen; i++ { + b[i] = rdigits[int(b[i]%base)] + } + return string(b[:nameHashLen]) +} diff --git a/vendor/github.com/nats-io/nats.go/jserrors.go b/vendor/github.com/nats-io/nats.go/jserrors.go new file mode 100644 index 00000000..c8b1f5fc --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jserrors.go @@ -0,0 +1,235 @@ +// Copyright 2020-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "errors" + "fmt" +) + +var ( + // API errors + + // ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account. + ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} + + // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account. + ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} + + // ErrStreamNotFound is an error returned when stream with given name does not exist. + ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} + + // ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration. + ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} + + // ErrStreamSubjectTransformNotSupported is returned when the connected nats-server version does not support setting + // the stream subject transform. If this error is returned when executing AddStream(), the stream with invalid + // configuration was already created in the server. + ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} + + // ErrStreamSourceSubjectTransformNotSupported is returned when the connected nats-server version does not support setting + // the stream source subject transform. If this error is returned when executing AddStream(), the stream with invalid + // configuration was already created in the server. + ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} + + // ErrStreamSourceNotSupported is returned when the connected nats-server version does not support setting + // the stream sources. If this error is returned when executing AddStream(), the stream with invalid + // configuration was already created in the server. + ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} + + // ErrStreamSourceMultipleSubjectTransformsNotSupported is returned when the connected nats-server version does not support setting + // the stream sources. If this error is returned when executing AddStream(), the stream with invalid + // configuration was already created in the server. + ErrStreamSourceMultipleSubjectTransformsNotSupported JetStreamError = &jsError{message: "stream sourceing with multiple subject transforms not supported by nats-server"} + + // ErrConsumerNotFound is an error returned when consumer with given name does not exist. + ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} + + // ErrMsgNotFound is returned when message with provided sequence number does npt exist. + ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} + + // ErrBadRequest is returned when invalid request is sent to JetStream API. + ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} + + // ErrDuplicateFilterSubjects is returned when both FilterSubject and FilterSubjects are specified when creating consumer. + ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} + + // ErrDuplicateFilterSubjects is returned when filter subjects overlap when creating consumer. + ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} + + // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. + ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} + + // Client errors + + // ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists. + ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} + + // ErrConsumerNotActive is an error returned when consumer is not active. + ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"} + + // ErrInvalidJSAck is returned when JetStream ack from message publish is invalid. + ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} + + // ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream. + ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"} + + // ErrStreamNameRequired is returned when the provided stream name is empty. + ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} + + // ErrConsumerNameRequired is returned when the provided consumer durable name is empty. + ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"} + + // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the connected nats-server version does not support setting + // multiple filter subjects with filter_subjects field. If this error is returned when executing AddConsumer(), the consumer with invalid + // configuration was already created in the server. + ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} + + // ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer. + ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"} + + // ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer. + ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"} + + // ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers. + ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"} + + // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once. + ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} + + // ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error). + ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} + + // ErrNotJSMessage is returned when attempting to get metadata from non JetStream message . + ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} + + // ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' '). + ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} + + // ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' '). + ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} + + // ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful. + ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"} + + // ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject. + ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"} + + // ErrContextAndTimeout is returned when attempting to use both context and timeout. + ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"} + + // ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set. + ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"} + + // ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist + ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} + + // ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed + ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"} + + // ErrNoHeartbeat is returned when no heartbeat is received from server when sending requests with pull consumer. + ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} + + // DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases. + // Use ErrInvalidConsumerName instead. + ErrInvalidDurableName = errors.New("nats: invalid durable name") +) + +// Error code represents JetStream error codes returned by the API +type ErrorCode uint16 + +const ( + JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 + JSErrCodeJetStreamNotEnabled ErrorCode = 10076 + JSErrCodeInsufficientResourcesErr ErrorCode = 10023 + + JSErrCodeStreamNotFound ErrorCode = 10059 + JSErrCodeStreamNameInUse ErrorCode = 10058 + + JSErrCodeConsumerNotFound ErrorCode = 10014 + JSErrCodeConsumerNameExists ErrorCode = 10013 + JSErrCodeConsumerAlreadyExists ErrorCode = 10105 + JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 + JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 + JSErrCodeConsumerEmptyFilter ErrorCode = 10139 + + JSErrCodeMessageNotFound ErrorCode = 10037 + + JSErrCodeBadRequest ErrorCode = 10003 + JSStreamInvalidConfig ErrorCode = 10052 + + JSErrCodeStreamWrongLastSequence ErrorCode = 10071 +) + +// APIError is included in all API responses if there was an error. +type APIError struct { + Code int `json:"code"` + ErrorCode ErrorCode `json:"err_code"` + Description string `json:"description,omitempty"` +} + +// Error prints the JetStream API error code and description +func (e *APIError) Error() string { + return fmt.Sprintf("nats: %s", e.Description) +} + +// APIError implements the JetStreamError interface. +func (e *APIError) APIError() *APIError { + return e +} + +// Is matches against an APIError. +func (e *APIError) Is(err error) bool { + if e == nil { + return false + } + // Extract internal APIError to match against. + var aerr *APIError + ok := errors.As(err, &aerr) + if !ok { + return ok + } + return e.ErrorCode == aerr.ErrorCode +} + +// JetStreamError is an error result that happens when using JetStream. +// In case of client-side error, `APIError()` returns nil +type JetStreamError interface { + APIError() *APIError + error +} + +type jsError struct { + apiErr *APIError + message string +} + +func (err *jsError) APIError() *APIError { + return err.apiErr +} + +func (err *jsError) Error() string { + if err.apiErr != nil && err.apiErr.Description != "" { + return err.apiErr.Error() + } + return fmt.Sprintf("nats: %s", err.message) +} + +func (err *jsError) Unwrap() error { + // Allow matching to embedded APIError in case there is one. + if err.apiErr == nil { + return nil + } + return err.apiErr +} diff --git a/vendor/github.com/nats-io/nats.go/jsm.go b/vendor/github.com/nats-io/nats.go/jsm.go new file mode 100644 index 00000000..266bf066 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jsm.go @@ -0,0 +1,1665 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// JetStreamManager manages JetStream Streams and Consumers. +type JetStreamManager interface { + // AddStream creates a stream. + AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) + + // UpdateStream updates a stream. + UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) + + // DeleteStream deletes a stream. + DeleteStream(name string, opts ...JSOpt) error + + // StreamInfo retrieves information from a stream. + StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) + + // PurgeStream purges a stream messages. + PurgeStream(name string, opts ...JSOpt) error + + // StreamsInfo can be used to retrieve a list of StreamInfo objects. + // DEPRECATED: Use Streams() instead. + StreamsInfo(opts ...JSOpt) <-chan *StreamInfo + + // Streams can be used to retrieve a list of StreamInfo objects. + Streams(opts ...JSOpt) <-chan *StreamInfo + + // StreamNames is used to retrieve a list of Stream names. + StreamNames(opts ...JSOpt) <-chan string + + // GetMsg retrieves a raw stream message stored in JetStream by sequence number. + // Use options nats.DirectGet() or nats.DirectGetNext() to trigger retrieval + // directly from a distributed group of servers (leader and replicas). + // The stream must have been created/updated with the AllowDirect boolean. + GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) + + // GetLastMsg retrieves the last raw stream message stored in JetStream by subject. + // Use option nats.DirectGet() to trigger retrieval + // directly from a distributed group of servers (leader and replicas). + // The stream must have been created/updated with the AllowDirect boolean. + GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) + + // DeleteMsg deletes a message from a stream. The message is marked as erased, but its value is not overwritten. + DeleteMsg(name string, seq uint64, opts ...JSOpt) error + + // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data + // As a result, this operation is slower than DeleteMsg() + SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error + + // AddConsumer adds a consumer to a stream. + AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) + + // UpdateConsumer updates an existing consumer. + UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) + + // DeleteConsumer deletes a consumer. + DeleteConsumer(stream, consumer string, opts ...JSOpt) error + + // ConsumerInfo retrieves information of a consumer from a stream. + ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error) + + // ConsumersInfo is used to retrieve a list of ConsumerInfo objects. + // DEPRECATED: Use Consumers() instead. + ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo + + // Consumers is used to retrieve a list of ConsumerInfo objects. + Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo + + // ConsumerNames is used to retrieve a list of Consumer names. + ConsumerNames(stream string, opts ...JSOpt) <-chan string + + // AccountInfo retrieves info about the JetStream usage from an account. + AccountInfo(opts ...JSOpt) (*AccountInfo, error) + + // StreamNameBySubject returns a stream matching given subject. + StreamNameBySubject(string, ...JSOpt) (string, error) +} + +// StreamConfig will determine the properties for a stream. +// There are sensible defaults for most. If no subjects are +// given the name will be used as the only subject. +type StreamConfig struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Subjects []string `json:"subjects,omitempty"` + Retention RetentionPolicy `json:"retention"` + MaxConsumers int `json:"max_consumers"` + MaxMsgs int64 `json:"max_msgs"` + MaxBytes int64 `json:"max_bytes"` + Discard DiscardPolicy `json:"discard"` + DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` + MaxAge time.Duration `json:"max_age"` + MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` + MaxMsgSize int32 `json:"max_msg_size,omitempty"` + Storage StorageType `json:"storage"` + Replicas int `json:"num_replicas"` + NoAck bool `json:"no_ack,omitempty"` + Template string `json:"template_owner,omitempty"` + Duplicates time.Duration `json:"duplicate_window,omitempty"` + Placement *Placement `json:"placement,omitempty"` + Mirror *StreamSource `json:"mirror,omitempty"` + Sources []*StreamSource `json:"sources,omitempty"` + Sealed bool `json:"sealed,omitempty"` + DenyDelete bool `json:"deny_delete,omitempty"` + DenyPurge bool `json:"deny_purge,omitempty"` + AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` + Compression StoreCompression `json:"compression"` + FirstSeq uint64 `json:"first_seq,omitempty"` + + // Allow applying a subject transform to incoming messages before doing anything else. + SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` + + // Allow republish of the message after being sequenced and stored. + RePublish *RePublish `json:"republish,omitempty"` + + // Allow higher performance, direct access to get individual messages. E.g. KeyValue + AllowDirect bool `json:"allow_direct"` + // Allow higher performance and unified direct access for mirrors as well. + MirrorDirect bool `json:"mirror_direct"` + + // Limits for consumers on this stream. + ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` + + // Metadata is additional metadata for the Stream. + // Keys starting with `_nats` are reserved. + // NOTE: Metadata requires nats-server v2.10.0+ + Metadata map[string]string `json:"metadata,omitempty"` +} + +// SubjectTransformConfig is for applying a subject transform (to matching messages) before doing anything else when a new message is received. +type SubjectTransformConfig struct { + Source string `json:"src,omitempty"` + Destination string `json:"dest"` +} + +// RePublish is for republishing messages once committed to a stream. The original +// subject cis remapped from the subject pattern to the destination pattern. +type RePublish struct { + Source string `json:"src,omitempty"` + Destination string `json:"dest"` + HeadersOnly bool `json:"headers_only,omitempty"` +} + +// Placement is used to guide placement of streams in clustered JetStream. +type Placement struct { + Cluster string `json:"cluster"` + Tags []string `json:"tags,omitempty"` +} + +// StreamSource dictates how streams can source from other streams. +type StreamSource struct { + Name string `json:"name"` + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + FilterSubject string `json:"filter_subject,omitempty"` + SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` + External *ExternalStream `json:"external,omitempty"` + Domain string `json:"-"` +} + +// ExternalStream allows you to qualify access to a stream source in another +// account. +type ExternalStream struct { + APIPrefix string `json:"api"` + DeliverPrefix string `json:"deliver,omitempty"` +} + +// StreamConsumerLimits are the limits for a consumer on a stream. +// These can be overridden on a per consumer basis. +type StreamConsumerLimits struct { + InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + MaxAckPending int `json:"max_ack_pending,omitempty"` +} + +// Helper for copying when we do not want to change user's version. +func (ss *StreamSource) copy() *StreamSource { + nss := *ss + // Check pointers + if ss.OptStartTime != nil { + t := *ss.OptStartTime + nss.OptStartTime = &t + } + if ss.External != nil { + ext := *ss.External + nss.External = &ext + } + return &nss +} + +// If we have a Domain, convert to the appropriate ext.APIPrefix. +// This will change the stream source, so should be a copy passed in. +func (ss *StreamSource) convertDomain() error { + if ss.Domain == _EMPTY_ { + return nil + } + if ss.External != nil { + // These should be mutually exclusive. + // TODO(dlc) - Make generic? + return errors.New("nats: domain and external are both set") + } + ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} + return nil +} + +// apiResponse is a standard response from the JetStream JSON API +type apiResponse struct { + Type string `json:"type"` + Error *APIError `json:"error,omitempty"` +} + +// apiPaged includes variables used to create paged responses from the JSON API +type apiPaged struct { + Total int `json:"total"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +// apiPagedRequest includes parameters allowing specific pages to be requested +// from APIs responding with apiPaged. +type apiPagedRequest struct { + Offset int `json:"offset,omitempty"` +} + +// AccountInfo contains info about the JetStream usage from the current account. +type AccountInfo struct { + Tier + Domain string `json:"domain"` + API APIStats `json:"api"` + Tiers map[string]Tier `json:"tiers"` +} + +type Tier struct { + Memory uint64 `json:"memory"` + Store uint64 `json:"storage"` + Streams int `json:"streams"` + Consumers int `json:"consumers"` + Limits AccountLimits `json:"limits"` +} + +// APIStats reports on API calls to JetStream for this account. +type APIStats struct { + Total uint64 `json:"total"` + Errors uint64 `json:"errors"` +} + +// AccountLimits includes the JetStream limits of the current account. +type AccountLimits struct { + MaxMemory int64 `json:"max_memory"` + MaxStore int64 `json:"max_storage"` + MaxStreams int `json:"max_streams"` + MaxConsumers int `json:"max_consumers"` + MaxAckPending int `json:"max_ack_pending"` + MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"` + StoreMaxStreamBytes int64 `json:"storage_max_stream_bytes"` + MaxBytesRequired bool `json:"max_bytes_required"` +} + +type accountInfoResponse struct { + apiResponse + AccountInfo +} + +// AccountInfo retrieves info about the JetStream usage from the current account. +// If JetStream is not enabled, this will return ErrJetStreamNotEnabled +// Other errors can happen but are generally considered retryable +func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil) + if err != nil { + // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return nil, err + } + var info accountInfoResponse + if err := json.Unmarshal(resp.Data, &info); err != nil { + return nil, err + } + if info.Error != nil { + // Internally checks based on error code instead of description match. + if errors.Is(info.Error, ErrJetStreamNotEnabledForAccount) { + return nil, ErrJetStreamNotEnabledForAccount + } + return nil, info.Error + } + + return &info.AccountInfo, nil +} + +type createConsumerRequest struct { + Stream string `json:"stream_name"` + Config *ConsumerConfig `json:"config"` +} + +type consumerResponse struct { + apiResponse + *ConsumerInfo +} + +// AddConsumer will add a JetStream consumer. +func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if cfg == nil { + cfg = &ConsumerConfig{} + } + consumerName := cfg.Name + if consumerName == _EMPTY_ { + consumerName = cfg.Durable + } + if consumerName != _EMPTY_ { + consInfo, err := js.ConsumerInfo(stream, consumerName, opts...) + if err != nil && !errors.Is(err, ErrConsumerNotFound) && !errors.Is(err, ErrStreamNotFound) { + return nil, err + } + + if consInfo != nil { + sameConfig := checkConfig(&consInfo.Config, cfg) + if sameConfig != nil { + return nil, fmt.Errorf("%w: creating consumer %q on stream %q", ErrConsumerNameAlreadyInUse, consumerName, stream) + } else { + return consInfo, nil + } + } + } + + return js.upsertConsumer(stream, consumerName, cfg, opts...) +} + +func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if cfg == nil { + return nil, ErrConsumerConfigRequired + } + consumerName := cfg.Name + if consumerName == _EMPTY_ { + consumerName = cfg.Durable + } + if consumerName == _EMPTY_ { + return nil, ErrConsumerNameRequired + } + return js.upsertConsumer(stream, consumerName, cfg, opts...) +} + +func (js *js) upsertConsumer(stream, consumerName string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg}) + if err != nil { + return nil, err + } + + var ccSubj string + if consumerName == _EMPTY_ { + // if consumer name is empty (neither Durable nor Name is set), use the legacy ephemeral endpoint + ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) + } else if err := checkConsumerName(consumerName); err != nil { + return nil, err + } else if js.nc.serverMinVersion(2, 9, 0) { + if cfg.Durable != "" && js.opts.featureFlags.useDurableConsumerCreate { + // if user set the useDurableConsumerCreate flag, use the legacy DURABLE.CREATE endpoint + ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) + } else if cfg.FilterSubject == _EMPTY_ || cfg.FilterSubject == ">" { + // if filter subject is empty or ">", use the endpoint without filter subject + ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) + } else { + // if filter subject is not empty, use the endpoint with filter subject + ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) + } + } else { + if cfg.Durable != "" { + // if Durable is set, use the DURABLE.CREATE endpoint + ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) + } else { + // if Durable is not set, use the legacy ephemeral endpoint + ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) + } + } + + resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(ccSubj), req) + if err != nil { + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return nil, err + } + var info consumerResponse + err = json.Unmarshal(resp.Data, &info) + if err != nil { + return nil, err + } + if info.Error != nil { + if errors.Is(info.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + if errors.Is(info.Error, ErrConsumerNotFound) { + return nil, ErrConsumerNotFound + } + return nil, info.Error + } + + // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo + if len(cfg.FilterSubjects) != 0 && len(info.Config.FilterSubjects) == 0 { + return nil, ErrConsumerMultipleFilterSubjectsNotSupported + } + return info.ConsumerInfo, nil +} + +// consumerDeleteResponse is the response for a Consumer delete request. +type consumerDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` +} + +func checkStreamName(stream string) error { + if stream == _EMPTY_ { + return ErrStreamNameRequired + } + if strings.ContainsAny(stream, ". ") { + return ErrInvalidStreamName + } + return nil +} + +// Check that the consumer name is not empty and is valid (does not contain "." and " "). +// Additional consumer name validation is done in nats-server. +// Returns ErrConsumerNameRequired if consumer name is empty, ErrInvalidConsumerName is invalid, otherwise nil +func checkConsumerName(consumer string) error { + if consumer == _EMPTY_ { + return ErrConsumerNameRequired + } + if strings.ContainsAny(consumer, ". ") { + return ErrInvalidConsumerName + } + return nil +} + +// DeleteConsumer deletes a Consumer. +func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { + if err := checkStreamName(stream); err != nil { + return err + } + if err := checkConsumerName(consumer); err != nil { + return err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) + r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil) + if err != nil { + return err + } + var resp consumerDeleteResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + + if resp.Error != nil { + if errors.Is(resp.Error, ErrConsumerNotFound) { + return ErrConsumerNotFound + } + return resp.Error + } + return nil +} + +// ConsumerInfo returns information about a Consumer. +func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + if err := checkConsumerName(consumer); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + return js.getConsumerInfoContext(o.ctx, stream, consumer) +} + +// consumerLister fetches pages of ConsumerInfo objects. This object is not +// safe to use for multiple threads. +type consumerLister struct { + stream string + js *js + + err error + offset int + page []*ConsumerInfo + pageInfo *apiPaged +} + +// consumersRequest is the type used for Consumers requests. +type consumersRequest struct { + apiPagedRequest +} + +// consumerListResponse is the response for a Consumers List request. +type consumerListResponse struct { + apiResponse + apiPaged + Consumers []*ConsumerInfo `json:"consumers"` +} + +// Next fetches the next ConsumerInfo page. +func (c *consumerLister) Next() bool { + if c.err != nil { + return false + } + if err := checkStreamName(c.stream); err != nil { + c.err = err + return false + } + if c.pageInfo != nil && c.offset >= c.pageInfo.Total { + return false + } + + req, err := json.Marshal(consumersRequest{ + apiPagedRequest: apiPagedRequest{Offset: c.offset}, + }) + if err != nil { + c.err = err + return false + } + + var cancel context.CancelFunc + ctx := c.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) + defer cancel() + } + + clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerListT, c.stream)) + r, err := c.js.apiRequestWithContext(ctx, clSubj, req) + if err != nil { + c.err = err + return false + } + var resp consumerListResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + c.err = err + return false + } + if resp.Error != nil { + c.err = resp.Error + return false + } + + c.pageInfo = &resp.apiPaged + c.page = resp.Consumers + c.offset += len(c.page) + return true +} + +// Page returns the current ConsumerInfo page. +func (c *consumerLister) Page() []*ConsumerInfo { + return c.page +} + +// Err returns any errors found while fetching pages. +func (c *consumerLister) Err() error { + return c.err +} + +// Consumers is used to retrieve a list of ConsumerInfo objects. +func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan *ConsumerInfo) + l := &consumerLister{js: &js{nc: jsc.nc, opts: o}, stream: stream} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// ConsumersInfo is used to retrieve a list of ConsumerInfo objects. +// DEPRECATED: Use Consumers() instead. +func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo { + return jsc.Consumers(stream, opts...) +} + +type consumerNamesLister struct { + stream string + js *js + + err error + offset int + page []string + pageInfo *apiPaged +} + +// consumerNamesListResponse is the response for a Consumers Names List request. +type consumerNamesListResponse struct { + apiResponse + apiPaged + Consumers []string `json:"consumers"` +} + +// Next fetches the next consumer names page. +func (c *consumerNamesLister) Next() bool { + if c.err != nil { + return false + } + if err := checkStreamName(c.stream); err != nil { + c.err = err + return false + } + if c.pageInfo != nil && c.offset >= c.pageInfo.Total { + return false + } + + var cancel context.CancelFunc + ctx := c.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) + defer cancel() + } + + req, err := json.Marshal(consumersRequest{ + apiPagedRequest: apiPagedRequest{Offset: c.offset}, + }) + if err != nil { + c.err = err + return false + } + clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerNamesT, c.stream)) + r, err := c.js.apiRequestWithContext(ctx, clSubj, req) + if err != nil { + c.err = err + return false + } + var resp consumerNamesListResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + c.err = err + return false + } + if resp.Error != nil { + c.err = resp.Error + return false + } + + c.pageInfo = &resp.apiPaged + c.page = resp.Consumers + c.offset += len(c.page) + return true +} + +// Page returns the current ConsumerInfo page. +func (c *consumerNamesLister) Page() []string { + return c.page +} + +// Err returns any errors found while fetching pages. +func (c *consumerNamesLister) Err() error { + return c.err +} + +// ConsumerNames is used to retrieve a list of Consumer names. +func (jsc *js) ConsumerNames(stream string, opts ...JSOpt) <-chan string { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan string) + l := &consumerNamesLister{stream: stream, js: &js{nc: jsc.nc, opts: o}} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// streamCreateResponse stream creation. +type streamCreateResponse struct { + apiResponse + *StreamInfo +} + +func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { + if cfg == nil { + return nil, ErrStreamConfigRequired + } + if err := checkStreamName(cfg.Name); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + // In case we need to change anything, copy so we do not change the caller's version. + ncfg := *cfg + + // If we have a mirror and an external domain, convert to ext.APIPrefix. + if cfg.Mirror != nil && cfg.Mirror.Domain != _EMPTY_ { + // Copy so we do not change the caller's version. + ncfg.Mirror = ncfg.Mirror.copy() + if err := ncfg.Mirror.convertDomain(); err != nil { + return nil, err + } + } + // Check sources for the same. + if len(ncfg.Sources) > 0 { + ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) + for i, ss := range ncfg.Sources { + if ss.Domain != _EMPTY_ { + ncfg.Sources[i] = ss.copy() + if err := ncfg.Sources[i].convertDomain(); err != nil { + return nil, err + } + } + } + } + + req, err := json.Marshal(&ncfg) + if err != nil { + return nil, err + } + + csSubj := js.apiSubj(fmt.Sprintf(apiStreamCreateT, cfg.Name)) + r, err := js.apiRequestWithContext(o.ctx, csSubj, req) + if err != nil { + return nil, err + } + var resp streamCreateResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNameAlreadyInUse) { + return nil, ErrStreamNameAlreadyInUse + } + return nil, resp.Error + } + + // check that input subject transform (if used) is reflected in the returned ConsumerInfo + if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { + return nil, ErrStreamSubjectTransformNotSupported + } + if len(cfg.Sources) != 0 { + if len(cfg.Sources) != len(resp.Config.Sources) { + return nil, ErrStreamSourceNotSupported + } + for i := range cfg.Sources { + if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { + return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported + } + } + } + + return resp.StreamInfo, nil +} + +type ( + // StreamInfoRequest contains additional option to return + StreamInfoRequest struct { + apiPagedRequest + // DeletedDetails when true includes information about deleted messages + DeletedDetails bool `json:"deleted_details,omitempty"` + // SubjectsFilter when set, returns information on the matched subjects + SubjectsFilter string `json:"subjects_filter,omitempty"` + } + streamInfoResponse = struct { + apiResponse + apiPaged + *StreamInfo + } +) + +func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + var i int + var subjectMessagesMap map[string]uint64 + var req []byte + var requestPayload bool + + var siOpts StreamInfoRequest + if o.streamInfoOpts != nil { + requestPayload = true + siOpts = *o.streamInfoOpts + } + + for { + if requestPayload { + siOpts.Offset = i + if req, err = json.Marshal(&siOpts); err != nil { + return nil, err + } + } + + siSubj := js.apiSubj(fmt.Sprintf(apiStreamInfoT, stream)) + + r, err := js.apiRequestWithContext(o.ctx, siSubj, req) + if err != nil { + return nil, err + } + + var resp streamInfoResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + var total int + // for backwards compatibility + if resp.Total != 0 { + total = resp.Total + } else { + total = len(resp.State.Subjects) + } + + if requestPayload && len(resp.StreamInfo.State.Subjects) > 0 { + if subjectMessagesMap == nil { + subjectMessagesMap = make(map[string]uint64, total) + } + + for k, j := range resp.State.Subjects { + subjectMessagesMap[k] = j + i++ + } + } + + if i >= total { + if requestPayload { + resp.StreamInfo.State.Subjects = subjectMessagesMap + } + return resp.StreamInfo, nil + } + } +} + +// StreamInfo shows config and current state for this stream. +type StreamInfo struct { + Config StreamConfig `json:"config"` + Created time.Time `json:"created"` + State StreamState `json:"state"` + Cluster *ClusterInfo `json:"cluster,omitempty"` + Mirror *StreamSourceInfo `json:"mirror,omitempty"` + Sources []*StreamSourceInfo `json:"sources,omitempty"` + Alternates []*StreamAlternate `json:"alternates,omitempty"` +} + +// StreamAlternate is an alternate stream represented by a mirror. +type StreamAlternate struct { + Name string `json:"name"` + Domain string `json:"domain,omitempty"` + Cluster string `json:"cluster"` +} + +// StreamSourceInfo shows information about an upstream stream source. +type StreamSourceInfo struct { + Name string `json:"name"` + Lag uint64 `json:"lag"` + Active time.Duration `json:"active"` + External *ExternalStream `json:"external"` + Error *APIError `json:"error"` + FilterSubject string `json:"filter_subject,omitempty"` + SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` +} + +// StreamState is information about the given stream. +type StreamState struct { + Msgs uint64 `json:"messages"` + Bytes uint64 `json:"bytes"` + FirstSeq uint64 `json:"first_seq"` + FirstTime time.Time `json:"first_ts"` + LastSeq uint64 `json:"last_seq"` + LastTime time.Time `json:"last_ts"` + Consumers int `json:"consumer_count"` + Deleted []uint64 `json:"deleted"` + NumDeleted int `json:"num_deleted"` + NumSubjects uint64 `json:"num_subjects"` + Subjects map[string]uint64 `json:"subjects"` +} + +// ClusterInfo shows information about the underlying set of servers +// that make up the stream or consumer. +type ClusterInfo struct { + Name string `json:"name,omitempty"` + Leader string `json:"leader,omitempty"` + Replicas []*PeerInfo `json:"replicas,omitempty"` +} + +// PeerInfo shows information about all the peers in the cluster that +// are supporting the stream or consumer. +type PeerInfo struct { + Name string `json:"name"` + Current bool `json:"current"` + Offline bool `json:"offline,omitempty"` + Active time.Duration `json:"active"` + Lag uint64 `json:"lag,omitempty"` +} + +// UpdateStream updates a Stream. +func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { + if cfg == nil { + return nil, ErrStreamConfigRequired + } + if err := checkStreamName(cfg.Name); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + req, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + usSubj := js.apiSubj(fmt.Sprintf(apiStreamUpdateT, cfg.Name)) + r, err := js.apiRequestWithContext(o.ctx, usSubj, req) + if err != nil { + return nil, err + } + var resp streamInfoResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + // check that input subject transform (if used) is reflected in the returned StreamInfo + if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { + return nil, ErrStreamSubjectTransformNotSupported + } + + if len(cfg.Sources) != 0 { + if len(cfg.Sources) != len(resp.Config.Sources) { + return nil, ErrStreamSourceNotSupported + } + for i := range cfg.Sources { + if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { + return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported + } + } + } + + return resp.StreamInfo, nil +} + +// streamDeleteResponse is the response for a Stream delete request. +type streamDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` +} + +// DeleteStream deletes a Stream. +func (js *js) DeleteStream(name string, opts ...JSOpt) error { + if err := checkStreamName(name); err != nil { + return err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name)) + r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) + if err != nil { + return err + } + var resp streamDeleteResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNotFound) { + return ErrStreamNotFound + } + return resp.Error + } + return nil +} + +type apiMsgGetRequest struct { + Seq uint64 `json:"seq,omitempty"` + LastFor string `json:"last_by_subj,omitempty"` + NextFor string `json:"next_by_subj,omitempty"` +} + +// RawStreamMsg is a raw message stored in JetStream. +type RawStreamMsg struct { + Subject string + Sequence uint64 + Header Header + Data []byte + Time time.Time +} + +// storedMsg is a raw message stored in JetStream. +type storedMsg struct { + Subject string `json:"subject"` + Sequence uint64 `json:"seq"` + Header []byte `json:"hdrs,omitempty"` + Data []byte `json:"data,omitempty"` + Time time.Time `json:"time"` +} + +// apiMsgGetResponse is the response for a Stream get request. +type apiMsgGetResponse struct { + apiResponse + Message *storedMsg `json:"message,omitempty"` +} + +// GetLastMsg retrieves the last raw stream message stored in JetStream by subject. +func (js *js) GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) { + return js.getMsg(name, &apiMsgGetRequest{LastFor: subject}, opts...) +} + +// GetMsg retrieves a raw stream message stored in JetStream by sequence number. +func (js *js) GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) { + return js.getMsg(name, &apiMsgGetRequest{Seq: seq}, opts...) +} + +// Low level getMsg +func (js *js) getMsg(name string, mreq *apiMsgGetRequest, opts ...JSOpt) (*RawStreamMsg, error) { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + if err := checkStreamName(name); err != nil { + return nil, err + } + + var apiSubj string + if o.directGet && mreq.LastFor != _EMPTY_ { + apiSubj = apiDirectMsgGetLastBySubjectT + dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name, mreq.LastFor)) + r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) + if err != nil { + return nil, err + } + return convertDirectGetMsgResponseToMsg(name, r) + } + + if o.directGet { + apiSubj = apiDirectMsgGetT + mreq.NextFor = o.directNextFor + } else { + apiSubj = apiMsgGetT + } + + req, err := json.Marshal(mreq) + if err != nil { + return nil, err + } + + dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name)) + r, err := js.apiRequestWithContext(o.ctx, dsSubj, req) + if err != nil { + return nil, err + } + + if o.directGet { + return convertDirectGetMsgResponseToMsg(name, r) + } + + var resp apiMsgGetResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrMsgNotFound) { + return nil, ErrMsgNotFound + } + if errors.Is(resp.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + msg := resp.Message + + var hdr Header + if len(msg.Header) > 0 { + hdr, err = DecodeHeadersMsg(msg.Header) + if err != nil { + return nil, err + } + } + + return &RawStreamMsg{ + Subject: msg.Subject, + Sequence: msg.Sequence, + Header: hdr, + Data: msg.Data, + Time: msg.Time, + }, nil +} + +func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error) { + // Check for 404/408. We would get a no-payload message and a "Status" header + if len(r.Data) == 0 { + val := r.Header.Get(statusHdr) + if val != _EMPTY_ { + switch val { + case noMessagesSts: + return nil, ErrMsgNotFound + default: + desc := r.Header.Get(descrHdr) + if desc == _EMPTY_ { + desc = "unable to get message" + } + return nil, fmt.Errorf("nats: %s", desc) + } + } + } + // Check for headers that give us the required information to + // reconstruct the message. + if len(r.Header) == 0 { + return nil, fmt.Errorf("nats: response should have headers") + } + stream := r.Header.Get(JSStream) + if stream == _EMPTY_ { + return nil, fmt.Errorf("nats: missing stream header") + } + + // Mirrors can now answer direct gets, so removing check for name equality. + // TODO(dlc) - We could have server also have a header with origin and check that? + + seqStr := r.Header.Get(JSSequence) + if seqStr == _EMPTY_ { + return nil, fmt.Errorf("nats: missing sequence header") + } + seq, err := strconv.ParseUint(seqStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) + } + timeStr := r.Header.Get(JSTimeStamp) + if timeStr == _EMPTY_ { + return nil, fmt.Errorf("nats: missing timestamp header") + } + // Temporary code: the server in main branch is sending with format + // "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed + // to use format RFC3339Nano. Because of server test deps/cycle, + // support both until the server PR lands. + tm, err := time.Parse(time.RFC3339Nano, timeStr) + if err != nil { + tm, err = time.Parse("2006-01-02 15:04:05.999999999 +0000 UTC", timeStr) + if err != nil { + return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) + } + } + subj := r.Header.Get(JSSubject) + if subj == _EMPTY_ { + return nil, fmt.Errorf("nats: missing subject header") + } + return &RawStreamMsg{ + Subject: subj, + Sequence: seq, + Header: r.Header, + Data: r.Data, + Time: tm, + }, nil +} + +type msgDeleteRequest struct { + Seq uint64 `json:"seq"` + NoErase bool `json:"no_erase,omitempty"` +} + +// msgDeleteResponse is the response for a Stream delete request. +type msgDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` +} + +// DeleteMsg deletes a message from a stream. +// The message is marked as erased, but not overwritten +func (js *js) DeleteMsg(name string, seq uint64, opts ...JSOpt) error { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq, NoErase: true}) +} + +// SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data +// As a result, this operation is slower than DeleteMsg() +func (js *js) SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq}) +} + +func (js *js) deleteMsg(ctx context.Context, stream string, req *msgDeleteRequest) error { + if err := checkStreamName(stream); err != nil { + return err + } + reqJSON, err := json.Marshal(req) + if err != nil { + return err + } + + dsSubj := js.apiSubj(fmt.Sprintf(apiMsgDeleteT, stream)) + r, err := js.apiRequestWithContext(ctx, dsSubj, reqJSON) + if err != nil { + return err + } + var resp msgDeleteResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + if resp.Error != nil { + return resp.Error + } + return nil +} + +// StreamPurgeRequest is optional request information to the purge API. +type StreamPurgeRequest struct { + // Purge up to but not including sequence. + Sequence uint64 `json:"seq,omitempty"` + // Subject to match against messages for the purge command. + Subject string `json:"filter,omitempty"` + // Number of messages to keep. + Keep uint64 `json:"keep,omitempty"` +} + +type streamPurgeResponse struct { + apiResponse + Success bool `json:"success,omitempty"` + Purged uint64 `json:"purged"` +} + +// PurgeStream purges messages on a Stream. +func (js *js) PurgeStream(stream string, opts ...JSOpt) error { + if err := checkStreamName(stream); err != nil { + return err + } + var req *StreamPurgeRequest + var ok bool + for _, opt := range opts { + // For PurgeStream, only request body opt is relevant + if req, ok = opt.(*StreamPurgeRequest); ok { + break + } + } + return js.purgeStream(stream, req) +} + +func (js *js) purgeStream(stream string, req *StreamPurgeRequest, opts ...JSOpt) error { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + var b []byte + if req != nil { + if b, err = json.Marshal(req); err != nil { + return err + } + } + + psSubj := js.apiSubj(fmt.Sprintf(apiStreamPurgeT, stream)) + r, err := js.apiRequestWithContext(o.ctx, psSubj, b) + if err != nil { + return err + } + var resp streamPurgeResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrBadRequest) { + return fmt.Errorf("%w: %s", ErrBadRequest, "invalid purge request body") + } + return resp.Error + } + return nil +} + +// streamLister fetches pages of StreamInfo objects. This object is not safe +// to use for multiple threads. +type streamLister struct { + js *js + page []*StreamInfo + err error + + offset int + pageInfo *apiPaged +} + +// streamListResponse list of detailed stream information. +// A nil request is valid and means all streams. +type streamListResponse struct { + apiResponse + apiPaged + Streams []*StreamInfo `json:"streams"` +} + +// streamNamesRequest is used for Stream Name requests. +type streamNamesRequest struct { + apiPagedRequest + // These are filters that can be applied to the list. + Subject string `json:"subject,omitempty"` +} + +// Next fetches the next StreamInfo page. +func (s *streamLister) Next() bool { + if s.err != nil { + return false + } + if s.pageInfo != nil && s.offset >= s.pageInfo.Total { + return false + } + + req, err := json.Marshal(streamNamesRequest{ + apiPagedRequest: apiPagedRequest{Offset: s.offset}, + Subject: s.js.opts.streamListSubject, + }) + if err != nil { + s.err = err + return false + } + + var cancel context.CancelFunc + ctx := s.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), s.js.opts.wait) + defer cancel() + } + + slSubj := s.js.apiSubj(apiStreamListT) + r, err := s.js.apiRequestWithContext(ctx, slSubj, req) + if err != nil { + s.err = err + return false + } + var resp streamListResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + s.err = err + return false + } + if resp.Error != nil { + s.err = resp.Error + return false + } + + s.pageInfo = &resp.apiPaged + s.page = resp.Streams + s.offset += len(s.page) + return true +} + +// Page returns the current StreamInfo page. +func (s *streamLister) Page() []*StreamInfo { + return s.page +} + +// Err returns any errors found while fetching pages. +func (s *streamLister) Err() error { + return s.err +} + +// Streams can be used to retrieve a list of StreamInfo objects. +func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan *StreamInfo) + l := &streamLister{js: &js{nc: jsc.nc, opts: o}} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// StreamsInfo can be used to retrieve a list of StreamInfo objects. +// DEPRECATED: Use Streams() instead. +func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo { + return jsc.Streams(opts...) +} + +type streamNamesLister struct { + js *js + + err error + offset int + page []string + pageInfo *apiPaged +} + +// Next fetches the next stream names page. +func (l *streamNamesLister) Next() bool { + if l.err != nil { + return false + } + if l.pageInfo != nil && l.offset >= l.pageInfo.Total { + return false + } + + var cancel context.CancelFunc + ctx := l.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), l.js.opts.wait) + defer cancel() + } + + req, err := json.Marshal(streamNamesRequest{ + apiPagedRequest: apiPagedRequest{Offset: l.offset}, + Subject: l.js.opts.streamListSubject, + }) + if err != nil { + l.err = err + return false + } + r, err := l.js.apiRequestWithContext(ctx, l.js.apiSubj(apiStreams), req) + if err != nil { + l.err = err + return false + } + var resp streamNamesResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + l.err = err + return false + } + if resp.Error != nil { + l.err = resp.Error + return false + } + + l.pageInfo = &resp.apiPaged + l.page = resp.Streams + l.offset += len(l.page) + return true +} + +// Page returns the current ConsumerInfo page. +func (l *streamNamesLister) Page() []string { + return l.page +} + +// Err returns any errors found while fetching pages. +func (l *streamNamesLister) Err() error { + return l.err +} + +// StreamNames is used to retrieve a list of Stream names. +func (jsc *js) StreamNames(opts ...JSOpt) <-chan string { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan string) + l := &streamNamesLister{js: &js{nc: jsc.nc, opts: o}} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// StreamNameBySubject returns a stream name that matches the subject. +func (jsc *js) StreamNameBySubject(subj string, opts ...JSOpt) (string, error) { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return "", err + } + if cancel != nil { + defer cancel() + } + + var slr streamNamesResponse + req := &streamRequest{subj} + j, err := json.Marshal(req) + if err != nil { + return _EMPTY_, err + } + + resp, err := jsc.apiRequestWithContext(o.ctx, jsc.apiSubj(apiStreams), j) + if err != nil { + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return _EMPTY_, err + } + if err := json.Unmarshal(resp.Data, &slr); err != nil { + return _EMPTY_, err + } + + if slr.Error != nil || len(slr.Streams) != 1 { + return _EMPTY_, ErrNoMatchingStream + } + return slr.Streams[0], nil +} + +func getJSContextOpts(defs *jsOpts, opts ...JSOpt) (*jsOpts, context.CancelFunc, error) { + var o jsOpts + for _, opt := range opts { + if err := opt.configureJSContext(&o); err != nil { + return nil, nil, err + } + } + + // Check for option collisions. Right now just timeout and context. + if o.ctx != nil && o.wait != 0 { + return nil, nil, ErrContextAndTimeout + } + if o.wait == 0 && o.ctx == nil { + o.wait = defs.wait + } + var cancel context.CancelFunc + if o.ctx == nil && o.wait > 0 { + o.ctx, cancel = context.WithTimeout(context.Background(), o.wait) + } + if o.pre == _EMPTY_ { + o.pre = defs.pre + } + + return &o, cancel, nil +} diff --git a/vendor/github.com/nats-io/nats.go/kv.go b/vendor/github.com/nats-io/nats.go/kv.go new file mode 100644 index 00000000..7382f4d8 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/kv.go @@ -0,0 +1,1119 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "context" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/nats-io/nats.go/internal/parser" +) + +// KeyValueManager is used to manage KeyValue stores. +type KeyValueManager interface { + // KeyValue will lookup and bind to an existing KeyValue store. + KeyValue(bucket string) (KeyValue, error) + // CreateKeyValue will create a KeyValue store with the following configuration. + CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) + // DeleteKeyValue will delete this KeyValue store (JetStream stream). + DeleteKeyValue(bucket string) error + // KeyValueStoreNames is used to retrieve a list of key value store names + KeyValueStoreNames() <-chan string + // KeyValueStores is used to retrieve a list of key value store statuses + KeyValueStores() <-chan KeyValueStatus +} + +// KeyValue contains methods to operate on a KeyValue store. +type KeyValue interface { + // Get returns the latest value for the key. + Get(key string) (entry KeyValueEntry, err error) + // GetRevision returns a specific revision value for the key. + GetRevision(key string, revision uint64) (entry KeyValueEntry, err error) + // Put will place the new value for the key into the store. + Put(key string, value []byte) (revision uint64, err error) + // PutString will place the string for the key into the store. + PutString(key string, value string) (revision uint64, err error) + // Create will add the key/value pair iff it does not exist. + Create(key string, value []byte) (revision uint64, err error) + // Update will update the value iff the latest revision matches. + Update(key string, value []byte, last uint64) (revision uint64, err error) + // Delete will place a delete marker and leave all revisions. + Delete(key string, opts ...DeleteOpt) error + // Purge will place a delete marker and remove all previous revisions. + Purge(key string, opts ...DeleteOpt) error + // Watch for any updates to keys that match the keys argument which could include wildcards. + // Watch will send a nil entry when it has received all initial values. + Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) + // WatchAll will invoke the callback for all updates. + WatchAll(opts ...WatchOpt) (KeyWatcher, error) + // Keys will return all keys. + Keys(opts ...WatchOpt) ([]string, error) + // History will return all historical values for the key. + History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) + // Bucket returns the current bucket name. + Bucket() string + // PurgeDeletes will remove all current delete markers. + PurgeDeletes(opts ...PurgeOpt) error + // Status retrieves the status and configuration of a bucket + Status() (KeyValueStatus, error) +} + +// KeyValueStatus is run-time status about a Key-Value bucket +type KeyValueStatus interface { + // Bucket the name of the bucket + Bucket() string + + // Values is how many messages are in the bucket, including historical values + Values() uint64 + + // History returns the configured history kept per key + History() int64 + + // TTL is how long the bucket keeps values for + TTL() time.Duration + + // BackingStore indicates what technology is used for storage of the bucket + BackingStore() string + + // Bytes returns the size in bytes of the bucket + Bytes() uint64 +} + +// KeyWatcher is what is returned when doing a watch. +type KeyWatcher interface { + // Context returns watcher context optionally provided by nats.Context option. + Context() context.Context + // Updates returns a channel to read any updates to entries. + Updates() <-chan KeyValueEntry + // Stop will stop this watcher. + Stop() error +} + +type WatchOpt interface { + configureWatcher(opts *watchOpts) error +} + +// For nats.Context() support. +func (ctx ContextOpt) configureWatcher(opts *watchOpts) error { + opts.ctx = ctx + return nil +} + +type watchOpts struct { + ctx context.Context + // Do not send delete markers to the update channel. + ignoreDeletes bool + // Include all history per subject, not just last one. + includeHistory bool + // Include only updates for keys. + updatesOnly bool + // retrieve only the meta data of the entry + metaOnly bool +} + +type watchOptFn func(opts *watchOpts) error + +func (opt watchOptFn) configureWatcher(opts *watchOpts) error { + return opt(opts) +} + +// IncludeHistory instructs the key watcher to include historical values as well. +func IncludeHistory() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + if opts.updatesOnly { + return errors.New("nats: include history can not be used with updates only") + } + opts.includeHistory = true + return nil + }) +} + +// UpdatesOnly instructs the key watcher to only include updates on values (without latest values when started). +func UpdatesOnly() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + if opts.includeHistory { + return errors.New("nats: updates only can not be used with include history") + } + opts.updatesOnly = true + return nil + }) +} + +// IgnoreDeletes will have the key watcher not pass any deleted keys. +func IgnoreDeletes() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.ignoreDeletes = true + return nil + }) +} + +// MetaOnly instructs the key watcher to retrieve only the entry meta data, not the entry value +func MetaOnly() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.metaOnly = true + return nil + }) +} + +type PurgeOpt interface { + configurePurge(opts *purgeOpts) error +} + +type purgeOpts struct { + dmthr time.Duration // Delete markers threshold + ctx context.Context +} + +// DeleteMarkersOlderThan indicates that delete or purge markers older than that +// will be deleted as part of PurgeDeletes() operation, otherwise, only the data +// will be removed but markers that are recent will be kept. +// Note that if no option is specified, the default is 30 minutes. You can set +// this option to a negative value to instruct to always remove the markers, +// regardless of their age. +type DeleteMarkersOlderThan time.Duration + +func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { + opts.dmthr = time.Duration(ttl) + return nil +} + +// For nats.Context() support. +func (ctx ContextOpt) configurePurge(opts *purgeOpts) error { + opts.ctx = ctx + return nil +} + +type DeleteOpt interface { + configureDelete(opts *deleteOpts) error +} + +type deleteOpts struct { + // Remove all previous revisions. + purge bool + + // Delete only if the latest revision matches. + revision uint64 +} + +type deleteOptFn func(opts *deleteOpts) error + +func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { + return opt(opts) +} + +// LastRevision deletes if the latest revision matches. +func LastRevision(revision uint64) DeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.revision = revision + return nil + }) +} + +// purge removes all previous revisions. +func purge() DeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.purge = true + return nil + }) +} + +// KeyValueConfig is for configuring a KeyValue store. +type KeyValueConfig struct { + Bucket string + Description string + MaxValueSize int32 + History uint8 + TTL time.Duration + MaxBytes int64 + Storage StorageType + Replicas int + Placement *Placement + RePublish *RePublish + Mirror *StreamSource + Sources []*StreamSource +} + +// Used to watch all keys. +const ( + KeyValueMaxHistory = 64 + AllKeys = ">" + kvLatestRevision = 0 + kvop = "KV-Operation" + kvdel = "DEL" + kvpurge = "PURGE" +) + +type KeyValueOp uint8 + +const ( + KeyValuePut KeyValueOp = iota + KeyValueDelete + KeyValuePurge +) + +func (op KeyValueOp) String() string { + switch op { + case KeyValuePut: + return "KeyValuePutOp" + case KeyValueDelete: + return "KeyValueDeleteOp" + case KeyValuePurge: + return "KeyValuePurgeOp" + default: + return "Unknown Operation" + } +} + +// KeyValueEntry is a retrieved entry for Get or List or Watch. +type KeyValueEntry interface { + // Bucket is the bucket the data was loaded from. + Bucket() string + // Key is the key that was retrieved. + Key() string + // Value is the retrieved value. + Value() []byte + // Revision is a unique sequence for this value. + Revision() uint64 + // Created is the time the data was put in the bucket. + Created() time.Time + // Delta is distance from the latest value. + Delta() uint64 + // Operation returns Put or Delete or Purge. + Operation() KeyValueOp +} + +// Errors +var ( + ErrKeyValueConfigRequired = errors.New("nats: config required") + ErrInvalidBucketName = errors.New("nats: invalid bucket name") + ErrInvalidKey = errors.New("nats: invalid key") + ErrBucketNotFound = errors.New("nats: bucket not found") + ErrBadBucket = errors.New("nats: bucket not valid key-value store") + ErrKeyNotFound = errors.New("nats: key not found") + ErrKeyDeleted = errors.New("nats: key was deleted") + ErrHistoryToLarge = errors.New("nats: history limited to a max of 64") + ErrNoKeysFound = errors.New("nats: no keys found") +) + +var ( + ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} +) + +const ( + kvBucketNamePre = "KV_" + kvBucketNameTmpl = "KV_%s" + kvSubjectsTmpl = "$KV.%s.>" + kvSubjectsPreTmpl = "$KV.%s." + kvSubjectsPreDomainTmpl = "%s.$KV.%s." + kvNoPending = "0" +) + +// Regex for valid keys and buckets. +var ( + validBucketRe = regexp.MustCompile(`\A[a-zA-Z0-9_-]+\z`) + validKeyRe = regexp.MustCompile(`\A[-/_=\.a-zA-Z0-9]+\z`) +) + +// KeyValue will lookup and bind to an existing KeyValue store. +func (js *js) KeyValue(bucket string) (KeyValue, error) { + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, errors.New("nats: key-value requires at least server version 2.6.2") + } + if !validBucketRe.MatchString(bucket) { + return nil, ErrInvalidBucketName + } + stream := fmt.Sprintf(kvBucketNameTmpl, bucket) + si, err := js.StreamInfo(stream) + if err != nil { + if err == ErrStreamNotFound { + err = ErrBucketNotFound + } + return nil, err + } + // Do some quick sanity checks that this is a correctly formed stream for KV. + // Max msgs per subject should be > 0. + if si.Config.MaxMsgsPerSubject < 1 { + return nil, ErrBadBucket + } + + return mapStreamToKVS(js, si), nil +} + +// CreateKeyValue will create a KeyValue store with the following configuration. +func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, errors.New("nats: key-value requires at least server version 2.6.2") + } + if cfg == nil { + return nil, ErrKeyValueConfigRequired + } + if !validBucketRe.MatchString(cfg.Bucket) { + return nil, ErrInvalidBucketName + } + if _, err := js.AccountInfo(); err != nil { + return nil, err + } + + // Default to 1 for history. Max is 64 for now. + history := int64(1) + if cfg.History > 0 { + if cfg.History > KeyValueMaxHistory { + return nil, ErrHistoryToLarge + } + history = int64(cfg.History) + } + + replicas := cfg.Replicas + if replicas == 0 { + replicas = 1 + } + + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + maxMsgSize := cfg.MaxValueSize + if maxMsgSize == 0 { + maxMsgSize = -1 + } + // When stream's MaxAge is not set, server uses 2 minutes as the default + // for the duplicate window. If MaxAge is set, and lower than 2 minutes, + // then the duplicate window will be set to that. If MaxAge is greater, + // we will cap the duplicate window to 2 minutes (to be consistent with + // previous behavior). + duplicateWindow := 2 * time.Minute + if cfg.TTL > 0 && cfg.TTL < duplicateWindow { + duplicateWindow = cfg.TTL + } + scfg := &StreamConfig{ + Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), + Description: cfg.Description, + MaxMsgsPerSubject: history, + MaxBytes: maxBytes, + MaxAge: cfg.TTL, + MaxMsgSize: maxMsgSize, + Storage: cfg.Storage, + Replicas: replicas, + Placement: cfg.Placement, + AllowRollup: true, + DenyDelete: true, + Duplicates: duplicateWindow, + MaxMsgs: -1, + MaxConsumers: -1, + AllowDirect: true, + RePublish: cfg.RePublish, + } + if cfg.Mirror != nil { + // Copy in case we need to make changes so we do not change caller's version. + m := cfg.Mirror.copy() + if !strings.HasPrefix(m.Name, kvBucketNamePre) { + m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) + } + scfg.Mirror = m + scfg.MirrorDirect = true + } else if len(cfg.Sources) > 0 { + for _, ss := range cfg.Sources { + var sourceBucketName string + if strings.HasPrefix(ss.Name, kvBucketNamePre) { + sourceBucketName = ss.Name[len(kvBucketNamePre):] + } else { + sourceBucketName = ss.Name + ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) + } + + if ss.External == nil || sourceBucketName != cfg.Bucket { + ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} + } + scfg.Sources = append(scfg.Sources, ss) + } + scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} + } else { + scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} + } + + // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below. + if js.nc.serverMinVersion(2, 7, 2) { + scfg.Discard = DiscardNew + } + + si, err := js.AddStream(scfg) + if err != nil { + // If we have a failure to add, it could be because we have + // a config change if the KV was created against a pre 2.7.2 + // and we are now moving to a v2.7.2+. If that is the case + // and the only difference is the discard policy, then update + // the stream. + // The same logic applies for KVs created pre 2.9.x and + // the AllowDirect setting. + if err == ErrStreamNameAlreadyInUse { + if si, _ = js.StreamInfo(scfg.Name); si != nil { + // To compare, make the server's stream info discard + // policy same than ours. + si.Config.Discard = scfg.Discard + // Also need to set allow direct for v2.9.x+ + si.Config.AllowDirect = scfg.AllowDirect + if reflect.DeepEqual(&si.Config, scfg) { + si, err = js.UpdateStream(scfg) + } + } + } + if err != nil { + return nil, err + } + } + return mapStreamToKVS(js, si), nil +} + +// DeleteKeyValue will delete this KeyValue store (JetStream stream). +func (js *js) DeleteKeyValue(bucket string) error { + if !validBucketRe.MatchString(bucket) { + return ErrInvalidBucketName + } + stream := fmt.Sprintf(kvBucketNameTmpl, bucket) + return js.DeleteStream(stream) +} + +type kvs struct { + name string + stream string + pre string + putPre string + js *js + // If true, it means that APIPrefix/Domain was set in the context + // and we need to add something to some of our high level protocols + // (such as Put, etc..) + useJSPfx bool + // To know if we can use the stream direct get API + useDirect bool +} + +// Underlying entry. +type kve struct { + bucket string + key string + value []byte + revision uint64 + delta uint64 + created time.Time + op KeyValueOp +} + +func (e *kve) Bucket() string { return e.bucket } +func (e *kve) Key() string { return e.key } +func (e *kve) Value() []byte { return e.value } +func (e *kve) Revision() uint64 { return e.revision } +func (e *kve) Created() time.Time { return e.created } +func (e *kve) Delta() uint64 { return e.delta } +func (e *kve) Operation() KeyValueOp { return e.op } + +func keyValid(key string) bool { + if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { + return false + } + return validKeyRe.MatchString(key) +} + +// Get returns the latest value for the key. +func (kv *kvs) Get(key string) (KeyValueEntry, error) { + e, err := kv.get(key, kvLatestRevision) + if err != nil { + if err == ErrKeyDeleted { + return nil, ErrKeyNotFound + } + return nil, err + } + + return e, nil +} + +// GetRevision returns a specific revision value for the key. +func (kv *kvs) GetRevision(key string, revision uint64) (KeyValueEntry, error) { + e, err := kv.get(key, revision) + if err != nil { + if err == ErrKeyDeleted { + return nil, ErrKeyNotFound + } + return nil, err + } + + return e, nil +} + +func (kv *kvs) get(key string, revision uint64) (KeyValueEntry, error) { + if !keyValid(key) { + return nil, ErrInvalidKey + } + + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(key) + + var m *RawStreamMsg + var err error + var _opts [1]JSOpt + opts := _opts[:0] + if kv.useDirect { + opts = append(opts, DirectGet()) + } + + if revision == kvLatestRevision { + m, err = kv.js.GetLastMsg(kv.stream, b.String(), opts...) + } else { + m, err = kv.js.GetMsg(kv.stream, revision, opts...) + // If a sequence was provided, just make sure that the retrieved + // message subject matches the request. + if err == nil && m.Subject != b.String() { + return nil, ErrKeyNotFound + } + } + if err != nil { + if err == ErrMsgNotFound { + err = ErrKeyNotFound + } + return nil, err + } + + entry := &kve{ + bucket: kv.name, + key: key, + value: m.Data, + revision: m.Sequence, + created: m.Time, + } + + // Double check here that this is not a DEL Operation marker. + if len(m.Header) > 0 { + switch m.Header.Get(kvop) { + case kvdel: + entry.op = KeyValueDelete + return entry, ErrKeyDeleted + case kvpurge: + entry.op = KeyValuePurge + return entry, ErrKeyDeleted + } + } + + return entry, nil +} + +// Put will place the new value for the key into the store. +func (kv *kvs) Put(key string, value []byte) (revision uint64, err error) { + if !keyValid(key) { + return 0, ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.pre) + } + if kv.putPre != _EMPTY_ { + b.WriteString(kv.putPre) + } else { + b.WriteString(kv.pre) + } + b.WriteString(key) + + pa, err := kv.js.Publish(b.String(), value) + if err != nil { + return 0, err + } + return pa.Sequence, err +} + +// PutString will place the string for the key into the store. +func (kv *kvs) PutString(key string, value string) (revision uint64, err error) { + return kv.Put(key, []byte(value)) +} + +// Create will add the key/value pair if it does not exist. +func (kv *kvs) Create(key string, value []byte) (revision uint64, err error) { + v, err := kv.Update(key, value, 0) + if err == nil { + return v, nil + } + + // TODO(dlc) - Since we have tombstones for DEL ops for watchers, this could be from that + // so we need to double check. + if e, err := kv.get(key, kvLatestRevision); err == ErrKeyDeleted { + return kv.Update(key, value, e.Revision()) + } + + // Check if the expected last subject sequence is not zero which implies + // the key already exists. + if errors.Is(err, ErrKeyExists) { + jserr := ErrKeyExists.(*jsError) + return 0, fmt.Errorf("%w: %s", err, jserr.message) + } + + return 0, err +} + +// Update will update the value if the latest revision matches. +func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) { + if !keyValid(key) { + return 0, ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.pre) + } + b.WriteString(kv.pre) + b.WriteString(key) + + m := Msg{Subject: b.String(), Header: Header{}, Data: value} + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(revision, 10)) + + pa, err := kv.js.PublishMsg(&m) + if err != nil { + return 0, err + } + return pa.Sequence, err +} + +// Delete will place a delete marker and leave all revisions. +func (kv *kvs) Delete(key string, opts ...DeleteOpt) error { + if !keyValid(key) { + return ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.pre) + } + if kv.putPre != _EMPTY_ { + b.WriteString(kv.putPre) + } else { + b.WriteString(kv.pre) + } + b.WriteString(key) + + // DEL op marker. For watch functionality. + m := NewMsg(b.String()) + + var o deleteOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureDelete(&o); err != nil { + return err + } + } + } + + if o.purge { + m.Header.Set(kvop, kvpurge) + m.Header.Set(MsgRollup, MsgRollupSubject) + } else { + m.Header.Set(kvop, kvdel) + } + + if o.revision != 0 { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10)) + } + + _, err := kv.js.PublishMsg(m) + return err +} + +// Purge will remove the key and all revisions. +func (kv *kvs) Purge(key string, opts ...DeleteOpt) error { + return kv.Delete(key, append(opts, purge())...) +} + +const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute + +// PurgeDeletes will remove all current delete markers. +// This is a maintenance option if there is a larger buildup of delete markers. +// See DeleteMarkersOlderThan() option for more information. +func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error { + var o purgeOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configurePurge(&o); err != nil { + return err + } + } + } + // Transfer possible context purge option to the watcher. This is the + // only option that matters for the PurgeDeletes() feature. + var wopts []WatchOpt + if o.ctx != nil { + wopts = append(wopts, Context(o.ctx)) + } + watcher, err := kv.WatchAll(wopts...) + if err != nil { + return err + } + defer watcher.Stop() + + var limit time.Time + olderThan := o.dmthr + // Negative value is used to instruct to always remove markers, regardless + // of age. If set to 0 (or not set), use our default value. + if olderThan == 0 { + olderThan = kvDefaultPurgeDeletesMarkerThreshold + } + if olderThan > 0 { + limit = time.Now().Add(-olderThan) + } + + var deleteMarkers []KeyValueEntry + for entry := range watcher.Updates() { + if entry == nil { + break + } + if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { + deleteMarkers = append(deleteMarkers, entry) + } + } + + var ( + pr StreamPurgeRequest + b strings.Builder + ) + // Do actual purges here. + for _, entry := range deleteMarkers { + b.WriteString(kv.pre) + b.WriteString(entry.Key()) + pr.Subject = b.String() + pr.Keep = 0 + if olderThan > 0 && entry.Created().After(limit) { + pr.Keep = 1 + } + if err := kv.js.purgeStream(kv.stream, &pr); err != nil { + return err + } + b.Reset() + } + return nil +} + +// Keys() will return all keys. +func (kv *kvs) Keys(opts ...WatchOpt) ([]string, error) { + opts = append(opts, IgnoreDeletes(), MetaOnly()) + watcher, err := kv.WatchAll(opts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var keys []string + for entry := range watcher.Updates() { + if entry == nil { + break + } + keys = append(keys, entry.Key()) + } + if len(keys) == 0 { + return nil, ErrNoKeysFound + } + return keys, nil +} + +// History will return all values for the key. +func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) { + opts = append(opts, IncludeHistory()) + watcher, err := kv.Watch(key, opts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var entries []KeyValueEntry + for entry := range watcher.Updates() { + if entry == nil { + break + } + entries = append(entries, entry) + } + if len(entries) == 0 { + return nil, ErrKeyNotFound + } + return entries, nil +} + +// Implementation for Watch +type watcher struct { + mu sync.Mutex + updates chan KeyValueEntry + sub *Subscription + initDone bool + initPending uint64 + received uint64 + ctx context.Context +} + +// Context returns the context for the watcher if set. +func (w *watcher) Context() context.Context { + if w == nil { + return nil + } + return w.ctx +} + +// Updates returns the interior channel. +func (w *watcher) Updates() <-chan KeyValueEntry { + if w == nil { + return nil + } + return w.updates +} + +// Stop will unsubscribe from the watcher. +func (w *watcher) Stop() error { + if w == nil { + return nil + } + return w.sub.Unsubscribe() +} + +// WatchAll watches all keys. +func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) { + return kv.Watch(AllKeys, opts...) +} + +// Watch will fire the callback when a key that matches the keys pattern is updated. +// keys needs to be a valid NATS subject. +func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) { + var o watchOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureWatcher(&o); err != nil { + return nil, err + } + } + } + + // Could be a pattern so don't check for validity as we normally do. + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(keys) + keys = b.String() + + // We will block below on placing items on the chan. That is by design. + w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx} + + update := func(m *Msg) { + tokens, err := parser.GetMetadataFields(m.Reply) + if err != nil { + return + } + if len(m.Subject) <= len(kv.pre) { + return + } + subj := m.Subject[len(kv.pre):] + + var op KeyValueOp + if len(m.Header) > 0 { + switch m.Header.Get(kvop) { + case kvdel: + op = KeyValueDelete + case kvpurge: + op = KeyValuePurge + } + } + delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) + w.mu.Lock() + defer w.mu.Unlock() + if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { + entry := &kve{ + bucket: kv.name, + key: subj, + value: m.Data, + revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), + created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), + delta: delta, + op: op, + } + w.updates <- entry + } + // Check if done and initial values. + // Skip if UpdatesOnly() is set, since there will never be updates initially. + if !w.initDone { + w.received++ + // We set this on the first trip through.. + if w.initPending == 0 { + w.initPending = delta + } + if w.received > w.initPending || delta == 0 { + w.initDone = true + w.updates <- nil + } + } + } + + // Used ordered consumer to deliver results. + subOpts := []SubOpt{BindStream(kv.stream), OrderedConsumer()} + if !o.includeHistory { + subOpts = append(subOpts, DeliverLastPerSubject()) + } + if o.updatesOnly { + subOpts = append(subOpts, DeliverNew()) + } + if o.metaOnly { + subOpts = append(subOpts, HeadersOnly()) + } + if o.ctx != nil { + subOpts = append(subOpts, Context(o.ctx)) + } + // Create the sub and rest of initialization under the lock. + // We want to prevent the race between this code and the + // update() callback. + w.mu.Lock() + defer w.mu.Unlock() + sub, err := kv.js.Subscribe(keys, update, subOpts...) + if err != nil { + return nil, err + } + sub.mu.Lock() + // If there were no pending messages at the time of the creation + // of the consumer, send the marker. + // Skip if UpdatesOnly() is set, since there will never be updates initially. + if !o.updatesOnly { + if sub.jsi != nil && sub.jsi.pending == 0 { + w.initDone = true + w.updates <- nil + } + } else { + // if UpdatesOnly was used, mark initialization as complete + w.initDone = true + } + // Set us up to close when the waitForMessages func returns. + sub.pDone = func(_ string) { + close(w.updates) + } + sub.mu.Unlock() + + w.sub = sub + return w, nil +} + +// Bucket returns the current bucket name (JetStream stream). +func (kv *kvs) Bucket() string { + return kv.name +} + +// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus +type KeyValueBucketStatus struct { + nfo *StreamInfo + bucket string +} + +// Bucket the name of the bucket +func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } + +// Values is how many messages are in the bucket, including historical values +func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } + +// History returns the configured history kept per key +func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } + +// TTL is how long the bucket keeps values for +func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } + +// BackingStore indicates what technology is used for storage of the bucket +func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } + +// StreamInfo is the stream info retrieved to create the status +func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } + +// Bytes is the size of the stream +func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } + +// Status retrieves the status and configuration of a bucket +func (kv *kvs) Status() (KeyValueStatus, error) { + nfo, err := kv.js.StreamInfo(kv.stream) + if err != nil { + return nil, err + } + + return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil +} + +// KeyValueStoreNames is used to retrieve a list of key value store names +func (js *js) KeyValueStoreNames() <-chan string { + ch := make(chan string) + l := &streamNamesLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") + go func() { + defer close(ch) + for l.Next() { + for _, name := range l.Page() { + if !strings.HasPrefix(name, kvBucketNamePre) { + continue + } + ch <- name + } + } + }() + + return ch +} + +// KeyValueStores is used to retrieve a list of key value store statuses +func (js *js) KeyValueStores() <-chan KeyValueStatus { + ch := make(chan KeyValueStatus) + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") + go func() { + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { + continue + } + ch <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} + } + } + }() + return ch +} + +func mapStreamToKVS(js *js, info *StreamInfo) *kvs { + bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) + + kv := &kvs{ + name: bucket, + stream: info.Config.Name, + pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), + js: js, + // Determine if we need to use the JS prefix in front of Put and Delete operations + useJSPfx: js.opts.pre != defaultAPIPrefix, + useDirect: info.Config.AllowDirect, + } + + // If we are mirroring, we will have mirror direct on, so just use the mirror name + // and override use + if m := info.Config.Mirror; m != nil { + bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) + if m.External != nil && m.External.APIPrefix != _EMPTY_ { + kv.useJSPfx = false + kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) + kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) + } else { + kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) + } + } + + return kv +} diff --git a/vendor/github.com/nats-io/nats.go/legacy_jetstream.md b/vendor/github.com/nats-io/nats.go/legacy_jetstream.md new file mode 100644 index 00000000..43e1c73b --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/legacy_jetstream.md @@ -0,0 +1,83 @@ +# Legacy JetStream API + +This is a documentation for the legacy JetStream API. A README for the current +API can be found [here](jetstream/README.md) + +## JetStream Basic Usage + +```go +import "github.com/nats-io/nats.go" + +// Connect to NATS +nc, _ := nats.Connect(nats.DefaultURL) + +// Create JetStream Context +js, _ := nc.JetStream(nats.PublishAsyncMaxPending(256)) + +// Simple Stream Publisher +js.Publish("ORDERS.scratch", []byte("hello")) + +// Simple Async Stream Publisher +for i := 0; i < 500; i++ { + js.PublishAsync("ORDERS.scratch", []byte("hello")) +} +select { +case <-js.PublishAsyncComplete(): +case <-time.After(5 * time.Second): + fmt.Println("Did not resolve in time") +} + +// Simple Async Ephemeral Consumer +js.Subscribe("ORDERS.*", func(m *nats.Msg) { + fmt.Printf("Received a JetStream message: %s\n", string(m.Data)) +}) + +// Simple Sync Durable Consumer (optional SubOpts at the end) +sub, err := js.SubscribeSync("ORDERS.*", nats.Durable("MONITOR"), nats.MaxDeliver(3)) +m, err := sub.NextMsg(timeout) + +// Simple Pull Consumer +sub, err := js.PullSubscribe("ORDERS.*", "MONITOR") +msgs, err := sub.Fetch(10) + +// Unsubscribe +sub.Unsubscribe() + +// Drain +sub.Drain() +``` + +## JetStream Basic Management + +```go +import "github.com/nats-io/nats.go" + +// Connect to NATS +nc, _ := nats.Connect(nats.DefaultURL) + +// Create JetStream Context +js, _ := nc.JetStream() + +// Create a Stream +js.AddStream(&nats.StreamConfig{ + Name: "ORDERS", + Subjects: []string{"ORDERS.*"}, +}) + +// Update a Stream +js.UpdateStream(&nats.StreamConfig{ + Name: "ORDERS", + MaxBytes: 8, +}) + +// Create a Consumer +js.AddConsumer("ORDERS", &nats.ConsumerConfig{ + Durable: "MONITOR", +}) + +// Delete Consumer +js.DeleteConsumer("ORDERS", "MONITOR") + +// Delete Stream +js.DeleteStream("ORDERS") +``` diff --git a/vendor/github.com/nats-io/nats.go/nats.go b/vendor/github.com/nats-io/nats.go/nats.go new file mode 100644 index 00000000..da13692f --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/nats.go @@ -0,0 +1,5688 @@ +// Copyright 2012-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/nkeys" + "github.com/nats-io/nuid" + + "github.com/nats-io/nats.go/util" +) + +// Default Constants +const ( + Version = "1.31.0" + DefaultURL = "nats://127.0.0.1:4222" + DefaultPort = 4222 + DefaultMaxReconnect = 60 + DefaultReconnectWait = 2 * time.Second + DefaultReconnectJitter = 100 * time.Millisecond + DefaultReconnectJitterTLS = time.Second + DefaultTimeout = 2 * time.Second + DefaultPingInterval = 2 * time.Minute + DefaultMaxPingOut = 2 + DefaultMaxChanLen = 64 * 1024 // 64k + DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB + RequestChanLen = 8 + DefaultDrainTimeout = 30 * time.Second + DefaultFlusherTimeout = time.Minute + LangString = "go" +) + +const ( + // STALE_CONNECTION is for detection and proper handling of stale connections. + STALE_CONNECTION = "stale connection" + + // PERMISSIONS_ERR is for when nats server subject authorization has failed. + PERMISSIONS_ERR = "permissions violation" + + // AUTHORIZATION_ERR is for when nats server user authorization has failed. + AUTHORIZATION_ERR = "authorization violation" + + // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired. + AUTHENTICATION_EXPIRED_ERR = "user authentication expired" + + // AUTHENTICATION_REVOKED_ERR is for when user authorization has been revoked. + AUTHENTICATION_REVOKED_ERR = "user authentication revoked" + + // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired. + ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired" + + // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit + MAX_CONNECTIONS_ERR = "maximum connections exceeded" +) + +// Errors +var ( + ErrConnectionClosed = errors.New("nats: connection closed") + ErrConnectionDraining = errors.New("nats: connection draining") + ErrDrainTimeout = errors.New("nats: draining connection timed out") + ErrConnectionReconnecting = errors.New("nats: connection reconnecting") + ErrSecureConnRequired = errors.New("nats: secure connection required") + ErrSecureConnWanted = errors.New("nats: secure connection not available") + ErrBadSubscription = errors.New("nats: invalid subscription") + ErrTypeSubscription = errors.New("nats: invalid subscription type") + ErrBadSubject = errors.New("nats: invalid subject") + ErrBadQueueName = errors.New("nats: invalid queue name") + ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") + ErrTimeout = errors.New("nats: timeout") + ErrBadTimeout = errors.New("nats: timeout invalid") + ErrAuthorization = errors.New("nats: authorization violation") + ErrAuthExpired = errors.New("nats: authentication expired") + ErrAuthRevoked = errors.New("nats: authentication revoked") + ErrAccountAuthExpired = errors.New("nats: account authentication expired") + ErrNoServers = errors.New("nats: no servers available for connection") + ErrJsonParse = errors.New("nats: connect message, json parse error") + ErrChanArg = errors.New("nats: argument needs to be a channel type") + ErrMaxPayload = errors.New("nats: maximum payload exceeded") + ErrMaxMessages = errors.New("nats: maximum messages delivered") + ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") + ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") + ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") + ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") + ErrInvalidConnection = errors.New("nats: invalid connection") + ErrInvalidMsg = errors.New("nats: invalid message or message nil") + ErrInvalidArg = errors.New("nats: invalid argument") + ErrInvalidContext = errors.New("nats: invalid context") + ErrNoDeadlineContext = errors.New("nats: context requires a deadline") + ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server") + ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server") + ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler") + ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler") + ErrNoUserCB = errors.New("nats: user callback not defined") + ErrNkeyAndUser = errors.New("nats: user callback and nkey defined") + ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server") + ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) + ErrTokenAlreadySet = errors.New("nats: token and token handler both set") + ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection") + ErrMsgNoReply = errors.New("nats: message does not have a reply") + ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server") + ErrDisconnected = errors.New("nats: server is disconnected") + ErrHeadersNotSupported = errors.New("nats: headers not supported by this server") + ErrBadHeaderMsg = errors.New("nats: message could not decode headers") + ErrNoResponders = errors.New("nats: no responders available for request") + ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded") + ErrConnectionNotTLS = errors.New("nats: connection is not tls") +) + +// GetDefaultOptions returns default configuration options for the client. +func GetDefaultOptions() Options { + return Options{ + AllowReconnect: true, + MaxReconnect: DefaultMaxReconnect, + ReconnectWait: DefaultReconnectWait, + ReconnectJitter: DefaultReconnectJitter, + ReconnectJitterTLS: DefaultReconnectJitterTLS, + Timeout: DefaultTimeout, + PingInterval: DefaultPingInterval, + MaxPingsOut: DefaultMaxPingOut, + SubChanLen: DefaultMaxChanLen, + ReconnectBufSize: DefaultReconnectBufSize, + DrainTimeout: DefaultDrainTimeout, + FlusherTimeout: DefaultFlusherTimeout, + } +} + +// DEPRECATED: Use GetDefaultOptions() instead. +// DefaultOptions is not safe for use by multiple clients. +// For details see #308. +var DefaultOptions = GetDefaultOptions() + +// Status represents the state of the connection. +type Status int + +const ( + DISCONNECTED = Status(iota) + CONNECTED + CLOSED + RECONNECTING + CONNECTING + DRAINING_SUBS + DRAINING_PUBS +) + +func (s Status) String() string { + switch s { + case DISCONNECTED: + return "DISCONNECTED" + case CONNECTED: + return "CONNECTED" + case CLOSED: + return "CLOSED" + case RECONNECTING: + return "RECONNECTING" + case CONNECTING: + return "CONNECTING" + case DRAINING_SUBS: + return "DRAINING_SUBS" + case DRAINING_PUBS: + return "DRAINING_PUBS" + } + return "unknown status" +} + +// ConnHandler is used for asynchronous events such as +// disconnected and closed connections. +type ConnHandler func(*Conn) + +// ConnErrHandler is used to process asynchronous events like +// disconnected connection with the error (if any). +type ConnErrHandler func(*Conn, error) + +// ErrHandler is used to process asynchronous errors encountered +// while processing inbound messages. +type ErrHandler func(*Conn, *Subscription, error) + +// UserJWTHandler is used to fetch and return the account signed +// JWT for this user. +type UserJWTHandler func() (string, error) + +// TLSCertHandler is used to fetch and return tls certificate. +type TLSCertHandler func() (tls.Certificate, error) + +// RootCAsHandler is used to fetch and return a set of root certificate +// authorities that clients use when verifying server certificates. +type RootCAsHandler func() (*x509.CertPool, error) + +// SignatureHandler is used to sign a nonce from the server while +// authenticating with nkeys. The user should sign the nonce and +// return the raw signature. The client will base64 encode this to +// send to the server. +type SignatureHandler func([]byte) ([]byte, error) + +// AuthTokenHandler is used to generate a new token. +type AuthTokenHandler func() string + +// ReconnectDelayHandler is used to get from the user the desired +// delay the library should pause before attempting to reconnect +// again. Note that this is invoked after the library tried the +// whole list of URLs and failed to reconnect. +type ReconnectDelayHandler func(attempts int) time.Duration + +// asyncCB is used to preserve order for async callbacks. +type asyncCB struct { + f func() + next *asyncCB +} + +type asyncCallbacksHandler struct { + mu sync.Mutex + cond *sync.Cond + head *asyncCB + tail *asyncCB +} + +// Option is a function on the options for a connection. +type Option func(*Options) error + +// CustomDialer can be used to specify any dialer, not necessarily a +// *net.Dialer. A CustomDialer may also implement `SkipTLSHandshake() bool` +// in order to skip the TLS handshake in case not required. +type CustomDialer interface { + Dial(network, address string) (net.Conn, error) +} + +type InProcessConnProvider interface { + InProcessConn() (net.Conn, error) +} + +// Options can be used to create a customized connection. +type Options struct { + + // Url represents a single NATS server url to which the client + // will be connecting. If the Servers option is also set, it + // then becomes the first server in the Servers array. + Url string + + // InProcessServer represents a NATS server running within the + // same process. If this is set then we will attempt to connect + // to the server directly rather than using external TCP conns. + InProcessServer InProcessConnProvider + + // Servers is a configured set of servers which this client + // will use when attempting to connect. + Servers []string + + // NoRandomize configures whether we will randomize the + // server pool. + NoRandomize bool + + // NoEcho configures whether the server will echo back messages + // that are sent on this connection if we also have matching subscriptions. + // Note this is supported on servers >= version 1.2. Proto 1 or greater. + NoEcho bool + + // Name is an optional name label which will be sent to the server + // on CONNECT to identify the client. + Name string + + // Verbose signals the server to send an OK ack for commands + // successfully processed by the server. + Verbose bool + + // Pedantic signals the server whether it should be doing further + // validation of subjects. + Pedantic bool + + // Secure enables TLS secure connections that skip server + // verification by default. NOT RECOMMENDED. + Secure bool + + // TLSConfig is a custom TLS configuration to use for secure + // transports. + TLSConfig *tls.Config + + // TLSCertCB is used to fetch and return custom tls certificate. + TLSCertCB TLSCertHandler + + // TLSHandshakeFirst is used to instruct the library perform + // the TLS handshake right after the connect and before receiving + // the INFO protocol from the server. If this option is enabled + // but the server is not configured to perform the TLS handshake + // first, the connection will fail. + TLSHandshakeFirst bool + + // RootCAsCB is used to fetch and return a set of root certificate + // authorities that clients use when verifying server certificates. + RootCAsCB RootCAsHandler + + // AllowReconnect enables reconnection logic to be used when we + // encounter a disconnect from the current server. + AllowReconnect bool + + // MaxReconnect sets the number of reconnect attempts that will be + // tried before giving up. If negative, then it will never give up + // trying to reconnect. + // Defaults to 60. + MaxReconnect int + + // ReconnectWait sets the time to backoff after attempting a reconnect + // to a server that we were already connected to previously. + // Defaults to 2s. + ReconnectWait time.Duration + + // CustomReconnectDelayCB is invoked after the library tried every + // URL in the server list and failed to reconnect. It passes to the + // user the current number of attempts. This function returns the + // amount of time the library will sleep before attempting to reconnect + // again. It is strongly recommended that this value contains some + // jitter to prevent all connections to attempt reconnecting at the same time. + CustomReconnectDelayCB ReconnectDelayHandler + + // ReconnectJitter sets the upper bound for a random delay added to + // ReconnectWait during a reconnect when no TLS is used. + // Defaults to 100ms. + ReconnectJitter time.Duration + + // ReconnectJitterTLS sets the upper bound for a random delay added to + // ReconnectWait during a reconnect when TLS is used. + // Defaults to 1s. + ReconnectJitterTLS time.Duration + + // Timeout sets the timeout for a Dial operation on a connection. + // Defaults to 2s. + Timeout time.Duration + + // DrainTimeout sets the timeout for a Drain Operation to complete. + // Defaults to 30s. + DrainTimeout time.Duration + + // FlusherTimeout is the maximum time to wait for write operations + // to the underlying connection to complete (including the flusher loop). + // Defaults to 1m. + FlusherTimeout time.Duration + + // PingInterval is the period at which the client will be sending ping + // commands to the server, disabled if 0 or negative. + // Defaults to 2m. + PingInterval time.Duration + + // MaxPingsOut is the maximum number of pending ping commands that can + // be awaiting a response before raising an ErrStaleConnection error. + // Defaults to 2. + MaxPingsOut int + + // ClosedCB sets the closed handler that is called when a client will + // no longer be connected. + ClosedCB ConnHandler + + // DisconnectedCB sets the disconnected handler that is called + // whenever the connection is disconnected. + // Will not be called if DisconnectedErrCB is set + // DEPRECATED. Use DisconnectedErrCB which passes error that caused + // the disconnect event. + DisconnectedCB ConnHandler + + // DisconnectedErrCB sets the disconnected error handler that is called + // whenever the connection is disconnected. + // Disconnected error could be nil, for instance when user explicitly closes the connection. + // DisconnectedCB will not be called if DisconnectedErrCB is set + DisconnectedErrCB ConnErrHandler + + // ConnectedCB sets the connected handler called when the initial connection + // is established. It is not invoked on successful reconnects - for reconnections, + // use ReconnectedCB. ConnectedCB can be used in conjunction with RetryOnFailedConnect + // to detect whether the initial connect was successful. + ConnectedCB ConnHandler + + // ReconnectedCB sets the reconnected handler called whenever + // the connection is successfully reconnected. + ReconnectedCB ConnHandler + + // DiscoveredServersCB sets the callback that is invoked whenever a new + // server has joined the cluster. + DiscoveredServersCB ConnHandler + + // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) + AsyncErrorCB ErrHandler + + // ReconnectBufSize is the size of the backing bufio during reconnect. + // Once this has been exhausted publish operations will return an error. + // Defaults to 8388608 bytes (8MB). + ReconnectBufSize int + + // SubChanLen is the size of the buffered channel used between the socket + // Go routine and the message delivery for SyncSubscriptions. + // NOTE: This does not affect AsyncSubscriptions which are + // dictated by PendingLimits() + // Defaults to 65536. + SubChanLen int + + // UserJWT sets the callback handler that will fetch a user's JWT. + UserJWT UserJWTHandler + + // Nkey sets the public nkey that will be used to authenticate + // when connecting to the server. UserJWT and Nkey are mutually exclusive + // and if defined, UserJWT will take precedence. + Nkey string + + // SignatureCB designates the function used to sign the nonce + // presented from the server. + SignatureCB SignatureHandler + + // User sets the username to be used when connecting to the server. + User string + + // Password sets the password to be used when connecting to a server. + Password string + + // Token sets the token to be used when connecting to a server. + Token string + + // TokenHandler designates the function used to generate the token to be used when connecting to a server. + TokenHandler AuthTokenHandler + + // Dialer allows a custom net.Dialer when forming connections. + // DEPRECATED: should use CustomDialer instead. + Dialer *net.Dialer + + // CustomDialer allows to specify a custom dialer (not necessarily + // a *net.Dialer). + CustomDialer CustomDialer + + // UseOldRequestStyle forces the old method of Requests that utilize + // a new Inbox and a new Subscription for each request. + UseOldRequestStyle bool + + // NoCallbacksAfterClientClose allows preventing the invocation of + // callbacks after Close() is called. Client won't receive notifications + // when Close is invoked by user code. Default is to invoke the callbacks. + NoCallbacksAfterClientClose bool + + // LameDuckModeHandler sets the callback to invoke when the server notifies + // the connection that it entered lame duck mode, that is, going to + // gradually disconnect all its connections before shutting down. This is + // often used in deployments when upgrading NATS Servers. + LameDuckModeHandler ConnHandler + + // RetryOnFailedConnect sets the connection in reconnecting state right + // away if it can't connect to a server in the initial set. The + // MaxReconnect and ReconnectWait options are used for this process, + // similarly to when an established connection is disconnected. + // If a ReconnectHandler is set, it will be invoked on the first + // successful reconnect attempt (if the initial connect fails), + // and if a ClosedHandler is set, it will be invoked if + // it fails to connect (after exhausting the MaxReconnect attempts). + RetryOnFailedConnect bool + + // For websocket connections, indicates to the server that the connection + // supports compression. If the server does too, then data will be compressed. + Compression bool + + // For websocket connections, adds a path to connections url. + // This is useful when connecting to NATS behind a proxy. + ProxyPath string + + // InboxPrefix allows the default _INBOX prefix to be customized + InboxPrefix string + + // IgnoreAuthErrorAbort - if set to true, client opts out of the default connect behavior of aborting + // subsequent reconnect attempts if server returns the same auth error twice (regardless of reconnect policy). + IgnoreAuthErrorAbort bool + + // SkipHostLookup skips the DNS lookup for the server hostname. + SkipHostLookup bool +} + +const ( + // Scratch storage for assembling protocol headers + scratchSize = 512 + + // The size of the bufio reader/writer on top of the socket. + defaultBufSize = 32768 + + // The buffered size of the flush "kick" channel + flushChanSize = 1 + + // Default server pool size + srvPoolSize = 4 + + // NUID size + nuidSize = 22 + + // Default ports used if none is specified in given URL(s) + defaultWSPortString = "80" + defaultWSSPortString = "443" + defaultPortString = "4222" +) + +// A Conn represents a bare connection to a nats-server. +// It can send and receive []byte payloads. +// The connection is safe to use in multiple Go routines concurrently. +type Conn struct { + // Keep all members for which we use atomic at the beginning of the + // struct and make sure they are all 64bits (or use padding if necessary). + // atomic.* functions crash on 32bit machines if operand is not aligned + // at 64bit. See https://github.com/golang/go/issues/599 + Statistics + mu sync.RWMutex + // Opts holds the configuration of the Conn. + // Modifying the configuration of a running Conn is a race. + Opts Options + wg sync.WaitGroup + srvPool []*srv + current *srv + urls map[string]struct{} // Keep track of all known URLs (used by processInfo) + conn net.Conn + bw *natsWriter + br *natsReader + fch chan struct{} + info serverInfo + ssid int64 + subsMu sync.RWMutex + subs map[int64]*Subscription + ach *asyncCallbacksHandler + pongs []chan struct{} + scratch [scratchSize]byte + status Status + statListeners map[Status][]chan Status + initc bool // true if the connection is performing the initial connect + err error + ps *parseState + ptmr *time.Timer + pout int + ar bool // abort reconnect + rqch chan struct{} + ws bool // true if a websocket connection + + // New style response handler + respSub string // The wildcard subject + respSubPrefix string // the wildcard prefix including trailing . + respSubLen int // the length of the wildcard prefix excluding trailing . + respScanf string // The scanf template to extract mux token + respMux *Subscription // A single response subscription + respMap map[string]chan *Msg // Request map for the response msg channels + respRand *rand.Rand // Used for generating suffix + + // Msg filters for testing. + // Protected by subsMu + filters map[string]msgFilter +} + +type natsReader struct { + r io.Reader + buf []byte + off int + n int +} + +type natsWriter struct { + w io.Writer + bufs []byte + limit int + pending *bytes.Buffer + plimit int +} + +// Subscription represents interest in a given subject. +type Subscription struct { + mu sync.Mutex + sid int64 + + // Subject that represents this subscription. This can be different + // than the received subject inside a Msg if this is a wildcard. + Subject string + + // Optional queue group name. If present, all subscriptions with the + // same name will form a distributed queue, and each message will + // only be processed by one member of the group. + Queue string + + // For holding information about a JetStream consumer. + jsi *jsSub + + delivered uint64 + max uint64 + conn *Conn + mcb MsgHandler + mch chan *Msg + closed bool + sc bool + connClosed bool + + // Type of Subscription + typ SubscriptionType + + // Async linked list + pHead *Msg + pTail *Msg + pCond *sync.Cond + pDone func(subject string) + + // Pending stats, async subscriptions, high-speed etc. + pMsgs int + pBytes int + pMsgsMax int + pBytesMax int + pMsgsLimit int + pBytesLimit int + dropped int +} + +// Msg represents a message delivered by NATS. This structure is used +// by Subscribers and PublishMsg(). +// +// # Types of Acknowledgements +// +// In case using JetStream, there are multiple ways to ack a Msg: +// +// // Acknowledgement that a message has been processed. +// msg.Ack() +// +// // Negatively acknowledges a message. +// msg.Nak() +// +// // Terminate a message so that it is not redelivered further. +// msg.Term() +// +// // Signal the server that the message is being worked on and reset redelivery timer. +// msg.InProgress() +type Msg struct { + Subject string + Reply string + Header Header + Data []byte + Sub *Subscription + // Internal + next *Msg + wsz int + barrier *barrierInfo + ackd uint32 +} + +// Compares two msgs, ignores sub but checks all other public fields. +func (m *Msg) Equal(msg *Msg) bool { + if m == msg { + return true + } + if m == nil || msg == nil { + return false + } + if m.Subject != msg.Subject || m.Reply != msg.Reply { + return false + } + if !bytes.Equal(m.Data, msg.Data) { + return false + } + if len(m.Header) != len(msg.Header) { + return false + } + for k, v := range m.Header { + val, ok := msg.Header[k] + if !ok || len(v) != len(val) { + return false + } + for i, hdr := range v { + if hdr != val[i] { + return false + } + } + } + return true +} + +// Size returns a message size in bytes. +func (m *Msg) Size() int { + if m.wsz != 0 { + return m.wsz + } + hdr, _ := m.headerBytes() + return len(m.Subject) + len(m.Reply) + len(hdr) + len(m.Data) +} + +func (m *Msg) headerBytes() ([]byte, error) { + var hdr []byte + if len(m.Header) == 0 { + return hdr, nil + } + + var b bytes.Buffer + _, err := b.WriteString(hdrLine) + if err != nil { + return nil, ErrBadHeaderMsg + } + + err = http.Header(m.Header).Write(&b) + if err != nil { + return nil, ErrBadHeaderMsg + } + + _, err = b.WriteString(crlf) + if err != nil { + return nil, ErrBadHeaderMsg + } + + return b.Bytes(), nil +} + +type barrierInfo struct { + refs int64 + f func() +} + +// Tracks various stats received and sent on this connection, +// including counts for messages and bytes. +type Statistics struct { + InMsgs uint64 + OutMsgs uint64 + InBytes uint64 + OutBytes uint64 + Reconnects uint64 +} + +// Tracks individual backend servers. +type srv struct { + url *url.URL + didConnect bool + reconnects int + lastErr error + isImplicit bool + tlsName string +} + +// The INFO block received from the server. +type serverInfo struct { + ID string `json:"server_id"` + Name string `json:"server_name"` + Proto int `json:"proto"` + Version string `json:"version"` + Host string `json:"host"` + Port int `json:"port"` + Headers bool `json:"headers"` + AuthRequired bool `json:"auth_required,omitempty"` + TLSRequired bool `json:"tls_required,omitempty"` + TLSAvailable bool `json:"tls_available,omitempty"` + MaxPayload int64 `json:"max_payload"` + CID uint64 `json:"client_id,omitempty"` + ClientIP string `json:"client_ip,omitempty"` + Nonce string `json:"nonce,omitempty"` + Cluster string `json:"cluster,omitempty"` + ConnectURLs []string `json:"connect_urls,omitempty"` + LameDuckMode bool `json:"ldm,omitempty"` +} + +const ( + // clientProtoZero is the original client protocol from 2009. + // http://nats.io/documentation/internals/nats-protocol/ + /* clientProtoZero */ _ = iota + // clientProtoInfo signals a client can receive more then the original INFO block. + // This can be used to update clients on other cluster members, etc. + clientProtoInfo +) + +type connectInfo struct { + Verbose bool `json:"verbose"` + Pedantic bool `json:"pedantic"` + UserJWT string `json:"jwt,omitempty"` + Nkey string `json:"nkey,omitempty"` + Signature string `json:"sig,omitempty"` + User string `json:"user,omitempty"` + Pass string `json:"pass,omitempty"` + Token string `json:"auth_token,omitempty"` + TLS bool `json:"tls_required"` + Name string `json:"name"` + Lang string `json:"lang"` + Version string `json:"version"` + Protocol int `json:"protocol"` + Echo bool `json:"echo"` + Headers bool `json:"headers"` + NoResponders bool `json:"no_responders"` +} + +// MsgHandler is a callback function that processes messages delivered to +// asynchronous subscribers. +type MsgHandler func(msg *Msg) + +// Connect will attempt to connect to the NATS system. +// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 +// Comma separated arrays are also supported, e.g. urlA, urlB. +// Options start with the defaults but can be overridden. +// To connect to a NATS Server's websocket port, use the `ws` or `wss` scheme, such as +// `ws://localhost:8080`. Note that websocket schemes cannot be mixed with others (nats/tls). +func Connect(url string, options ...Option) (*Conn, error) { + opts := GetDefaultOptions() + opts.Servers = processUrlString(url) + for _, opt := range options { + if opt != nil { + if err := opt(&opts); err != nil { + return nil, err + } + } + } + return opts.Connect() +} + +// Options that can be passed to Connect. + +// Name is an Option to set the client name. +func Name(name string) Option { + return func(o *Options) error { + o.Name = name + return nil + } +} + +// InProcessServer is an Option that will try to establish a direction to a NATS server +// running within the process instead of dialing via TCP. +func InProcessServer(server InProcessConnProvider) Option { + return func(o *Options) error { + o.InProcessServer = server + return nil + } +} + +// Secure is an Option to enable TLS secure connections that skip server verification by default. +// Pass a TLS Configuration for proper TLS. +// NOTE: This should NOT be used in a production setting. +func Secure(tls ...*tls.Config) Option { + return func(o *Options) error { + o.Secure = true + // Use of variadic just simplifies testing scenarios. We only take the first one. + if len(tls) > 1 { + return ErrMultipleTLSConfigs + } + if len(tls) == 1 { + o.TLSConfig = tls[0] + } + return nil + } +} + +// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. +// If Secure is not already set this will set it as well. +func RootCAs(file ...string) Option { + return func(o *Options) error { + rootCAsCB := func() (*x509.CertPool, error) { + pool := x509.NewCertPool() + for _, f := range file { + rootPEM, err := os.ReadFile(f) + if err != nil || rootPEM == nil { + return nil, fmt.Errorf("nats: error loading or parsing rootCA file: %w", err) + } + ok := pool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, fmt.Errorf("nats: failed to parse root certificate from %q", f) + } + } + return pool, nil + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + if _, err := rootCAsCB(); err != nil { + return err + } + o.RootCAsCB = rootCAsCB + o.Secure = true + return nil + } +} + +// ClientCert is a helper option to provide the client certificate from a file. +// If Secure is not already set this will set it as well. +func ClientCert(certFile, keyFile string) Option { + return func(o *Options) error { + tlsCertCB := func() (tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return tls.Certificate{}, fmt.Errorf("nats: error loading client certificate: %w", err) + } + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return tls.Certificate{}, fmt.Errorf("nats: error parsing client certificate: %w", err) + } + return cert, nil + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + if _, err := tlsCertCB(); err != nil { + return err + } + o.TLSCertCB = tlsCertCB + o.Secure = true + return nil + } +} + +// NoReconnect is an Option to turn off reconnect behavior. +func NoReconnect() Option { + return func(o *Options) error { + o.AllowReconnect = false + return nil + } +} + +// DontRandomize is an Option to turn off randomizing the server pool. +func DontRandomize() Option { + return func(o *Options) error { + o.NoRandomize = true + return nil + } +} + +// NoEcho is an Option to turn off messages echoing back from a server. +// Note this is supported on servers >= version 1.2. Proto 1 or greater. +func NoEcho() Option { + return func(o *Options) error { + o.NoEcho = true + return nil + } +} + +// ReconnectWait is an Option to set the wait time between reconnect attempts. +// Defaults to 2s. +func ReconnectWait(t time.Duration) Option { + return func(o *Options) error { + o.ReconnectWait = t + return nil + } +} + +// MaxReconnects is an Option to set the maximum number of reconnect attempts. +// If negative, it will never stop trying to reconnect. +// Defaults to 60. +func MaxReconnects(max int) Option { + return func(o *Options) error { + o.MaxReconnect = max + return nil + } +} + +// ReconnectJitter is an Option to set the upper bound of a random delay added ReconnectWait. +// Defaults to 100ms and 1s, respectively. +func ReconnectJitter(jitter, jitterForTLS time.Duration) Option { + return func(o *Options) error { + o.ReconnectJitter = jitter + o.ReconnectJitterTLS = jitterForTLS + return nil + } +} + +// CustomReconnectDelay is an Option to set the CustomReconnectDelayCB option. +// See CustomReconnectDelayCB Option for more details. +func CustomReconnectDelay(cb ReconnectDelayHandler) Option { + return func(o *Options) error { + o.CustomReconnectDelayCB = cb + return nil + } +} + +// PingInterval is an Option to set the period for client ping commands. +// Defaults to 2m. +func PingInterval(t time.Duration) Option { + return func(o *Options) error { + o.PingInterval = t + return nil + } +} + +// MaxPingsOutstanding is an Option to set the maximum number of ping requests +// that can go unanswered by the server before closing the connection. +// Defaults to 2. +func MaxPingsOutstanding(max int) Option { + return func(o *Options) error { + o.MaxPingsOut = max + return nil + } +} + +// ReconnectBufSize sets the buffer size of messages kept while busy reconnecting. +// Defaults to 8388608 bytes (8MB). It can be disabled by setting it to -1. +func ReconnectBufSize(size int) Option { + return func(o *Options) error { + o.ReconnectBufSize = size + return nil + } +} + +// Timeout is an Option to set the timeout for Dial on a connection. +// Defaults to 2s. +func Timeout(t time.Duration) Option { + return func(o *Options) error { + o.Timeout = t + return nil + } +} + +// FlusherTimeout is an Option to set the write (and flush) timeout on a connection. +func FlusherTimeout(t time.Duration) Option { + return func(o *Options) error { + o.FlusherTimeout = t + return nil + } +} + +// DrainTimeout is an Option to set the timeout for draining a connection. +// Defaults to 30s. +func DrainTimeout(t time.Duration) Option { + return func(o *Options) error { + o.DrainTimeout = t + return nil + } +} + +// DisconnectErrHandler is an Option to set the disconnected error handler. +func DisconnectErrHandler(cb ConnErrHandler) Option { + return func(o *Options) error { + o.DisconnectedErrCB = cb + return nil + } +} + +// DisconnectHandler is an Option to set the disconnected handler. +// DEPRECATED: Use DisconnectErrHandler. +func DisconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DisconnectedCB = cb + return nil + } +} + +// ConnectHandler is an Option to set the connected handler. +func ConnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ConnectedCB = cb + return nil + } +} + +// ReconnectHandler is an Option to set the reconnected handler. +func ReconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ReconnectedCB = cb + return nil + } +} + +// ClosedHandler is an Option to set the closed handler. +func ClosedHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ClosedCB = cb + return nil + } +} + +// DiscoveredServersHandler is an Option to set the new servers handler. +func DiscoveredServersHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DiscoveredServersCB = cb + return nil + } +} + +// ErrorHandler is an Option to set the async error handler. +func ErrorHandler(cb ErrHandler) Option { + return func(o *Options) error { + o.AsyncErrorCB = cb + return nil + } +} + +// UserInfo is an Option to set the username and password to +// use when not included directly in the URLs. +func UserInfo(user, password string) Option { + return func(o *Options) error { + o.User = user + o.Password = password + return nil + } +} + +// Token is an Option to set the token to use +// when a token is not included directly in the URLs +// and when a token handler is not provided. +func Token(token string) Option { + return func(o *Options) error { + if o.TokenHandler != nil { + return ErrTokenAlreadySet + } + o.Token = token + return nil + } +} + +// TokenHandler is an Option to set the token handler to use +// when a token is not included directly in the URLs +// and when a token is not set. +func TokenHandler(cb AuthTokenHandler) Option { + return func(o *Options) error { + if o.Token != "" { + return ErrTokenAlreadySet + } + o.TokenHandler = cb + return nil + } +} + +// UserCredentials is a convenience function that takes a filename +// for a user's JWT and a filename for the user's private Nkey seed. +func UserCredentials(userOrChainedFile string, seedFiles ...string) Option { + userCB := func() (string, error) { + return userFromFile(userOrChainedFile) + } + var keyFile string + if len(seedFiles) > 0 { + keyFile = seedFiles[0] + } else { + keyFile = userOrChainedFile + } + sigCB := func(nonce []byte) ([]byte, error) { + return sigHandler(nonce, keyFile) + } + return UserJWT(userCB, sigCB) +} + +// UserJWTAndSeed is a convenience function that takes the JWT and seed +// values as strings. +func UserJWTAndSeed(jwt string, seed string) Option { + userCB := func() (string, error) { + return jwt, nil + } + + sigCB := func(nonce []byte) ([]byte, error) { + kp, err := nkeys.FromSeed([]byte(seed)) + if err != nil { + return nil, fmt.Errorf("unable to extract key pair from seed: %w", err) + } + // Wipe our key on exit. + defer kp.Wipe() + + sig, _ := kp.Sign(nonce) + return sig, nil + } + + return UserJWT(userCB, sigCB) +} + +// UserJWT will set the callbacks to retrieve the user's JWT and +// the signature callback to sign the server nonce. This an the Nkey +// option are mutually exclusive. +func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option { + return func(o *Options) error { + if userCB == nil { + return ErrNoUserCB + } + if sigCB == nil { + return ErrUserButNoSigCB + } + // Smoke test the user callback to ensure it is setup properly + // when processing options. + if _, err := userCB(); err != nil { + return err + } + + o.UserJWT = userCB + o.SignatureCB = sigCB + return nil + } +} + +// Nkey will set the public Nkey and the signature callback to +// sign the server nonce. +func Nkey(pubKey string, sigCB SignatureHandler) Option { + return func(o *Options) error { + o.Nkey = pubKey + o.SignatureCB = sigCB + if pubKey != "" && sigCB == nil { + return ErrNkeyButNoSigCB + } + return nil + } +} + +// SyncQueueLen will set the maximum queue len for the internal +// channel used for SubscribeSync(). +// Defaults to 65536. +func SyncQueueLen(max int) Option { + return func(o *Options) error { + o.SubChanLen = max + return nil + } +} + +// Dialer is an Option to set the dialer which will be used when +// attempting to establish a connection. +// DEPRECATED: Should use CustomDialer instead. +func Dialer(dialer *net.Dialer) Option { + return func(o *Options) error { + o.Dialer = dialer + return nil + } +} + +// SetCustomDialer is an Option to set a custom dialer which will be +// used when attempting to establish a connection. If both Dialer +// and CustomDialer are specified, CustomDialer takes precedence. +func SetCustomDialer(dialer CustomDialer) Option { + return func(o *Options) error { + o.CustomDialer = dialer + return nil + } +} + +// UseOldRequestStyle is an Option to force usage of the old Request style. +func UseOldRequestStyle() Option { + return func(o *Options) error { + o.UseOldRequestStyle = true + return nil + } +} + +// NoCallbacksAfterClientClose is an Option to disable callbacks when user code +// calls Close(). If close is initiated by any other condition, callbacks +// if any will be invoked. +func NoCallbacksAfterClientClose() Option { + return func(o *Options) error { + o.NoCallbacksAfterClientClose = true + return nil + } +} + +// LameDuckModeHandler sets the callback to invoke when the server notifies +// the connection that it entered lame duck mode, that is, going to +// gradually disconnect all its connections before shutting down. This is +// often used in deployments when upgrading NATS Servers. +func LameDuckModeHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.LameDuckModeHandler = cb + return nil + } +} + +// RetryOnFailedConnect sets the connection in reconnecting state right away +// if it can't connect to a server in the initial set. +// See RetryOnFailedConnect option for more details. +func RetryOnFailedConnect(retry bool) Option { + return func(o *Options) error { + o.RetryOnFailedConnect = retry + return nil + } +} + +// Compression is an Option to indicate if this connection supports +// compression. Currently only supported for Websocket connections. +func Compression(enabled bool) Option { + return func(o *Options) error { + o.Compression = enabled + return nil + } +} + +// ProxyPath is an option for websocket connections that adds a path to connections url. +// This is useful when connecting to NATS behind a proxy. +func ProxyPath(path string) Option { + return func(o *Options) error { + o.ProxyPath = path + return nil + } +} + +// CustomInboxPrefix configures the request + reply inbox prefix +func CustomInboxPrefix(p string) Option { + return func(o *Options) error { + if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") { + return fmt.Errorf("nats: invalid custom prefix") + } + o.InboxPrefix = p + return nil + } +} + +// IgnoreAuthErrorAbort opts out of the default connect behavior of aborting +// subsequent reconnect attempts if server returns the same auth error twice. +func IgnoreAuthErrorAbort() Option { + return func(o *Options) error { + o.IgnoreAuthErrorAbort = true + return nil + } +} + +// SkipHostLookup is an Option to skip the host lookup when connecting to a server. +func SkipHostLookup() Option { + return func(o *Options) error { + o.SkipHostLookup = true + return nil + } +} + +// TLSHandshakeFirst is an Option to perform the TLS handshake first, that is +// before receiving the INFO protocol. This requires the server to also be +// configured with such option, otherwise the connection will fail. +func TLSHandshakeFirst() Option { + return func(o *Options) error { + o.TLSHandshakeFirst = true + o.Secure = true + return nil + } +} + +// Handler processing + +// SetDisconnectHandler will set the disconnect event handler. +// DEPRECATED: Use SetDisconnectErrHandler +func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedCB = dcb +} + +// SetDisconnectErrHandler will set the disconnect event handler. +func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedErrCB = dcb +} + +// DisconnectErrHandler will return the disconnect event handler. +func (nc *Conn) DisconnectErrHandler() ConnErrHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.DisconnectedErrCB +} + +// SetReconnectHandler will set the reconnect event handler. +func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ReconnectedCB = rcb +} + +// ReconnectHandler will return the reconnect event handler. +func (nc *Conn) ReconnectHandler() ConnHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.ReconnectedCB +} + +// SetDiscoveredServersHandler will set the discovered servers handler. +func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DiscoveredServersCB = dscb +} + +// DiscoveredServersHandler will return the discovered servers handler. +func (nc *Conn) DiscoveredServersHandler() ConnHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.DiscoveredServersCB +} + +// SetClosedHandler will set the closed event handler. +func (nc *Conn) SetClosedHandler(cb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ClosedCB = cb +} + +// ClosedHandler will return the closed event handler. +func (nc *Conn) ClosedHandler() ConnHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.ClosedCB +} + +// SetErrorHandler will set the async error handler. +func (nc *Conn) SetErrorHandler(cb ErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.AsyncErrorCB = cb +} + +// ErrorHandler will return the async error handler. +func (nc *Conn) ErrorHandler() ErrHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.AsyncErrorCB +} + +// Process the url string argument to Connect. +// Return an array of urls, even if only one. +func processUrlString(url string) []string { + urls := strings.Split(url, ",") + var j int + for _, s := range urls { + u := strings.TrimSpace(s) + if len(u) > 0 { + urls[j] = u + j++ + } + } + return urls[:j] +} + +// Connect will attempt to connect to a NATS server with multiple options. +func (o Options) Connect() (*Conn, error) { + nc := &Conn{Opts: o} + + // Some default options processing. + if nc.Opts.MaxPingsOut == 0 { + nc.Opts.MaxPingsOut = DefaultMaxPingOut + } + // Allow old default for channel length to work correctly. + if nc.Opts.SubChanLen == 0 { + nc.Opts.SubChanLen = DefaultMaxChanLen + } + // Default ReconnectBufSize + if nc.Opts.ReconnectBufSize == 0 { + nc.Opts.ReconnectBufSize = DefaultReconnectBufSize + } + // Ensure that Timeout is not 0 + if nc.Opts.Timeout == 0 { + nc.Opts.Timeout = DefaultTimeout + } + + // Check first for user jwt callback being defined and nkey. + if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" { + return nil, ErrNkeyAndUser + } + + // Check if we have an nkey but no signature callback defined. + if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil { + return nil, ErrNkeyButNoSigCB + } + + // Allow custom Dialer for connecting using a timeout by default + if nc.Opts.Dialer == nil { + nc.Opts.Dialer = &net.Dialer{ + Timeout: nc.Opts.Timeout, + } + } + + // If the TLSHandshakeFirst option is specified, make sure that + // the Secure boolean is true. + if nc.Opts.TLSHandshakeFirst { + nc.Opts.Secure = true + } + + if err := nc.setupServerPool(); err != nil { + return nil, err + } + + // Create the async callback handler. + nc.ach = &asyncCallbacksHandler{} + nc.ach.cond = sync.NewCond(&nc.ach.mu) + + // Set a default error handler that will print to stderr. + if nc.Opts.AsyncErrorCB == nil { + nc.Opts.AsyncErrorCB = defaultErrHandler + } + + // Create reader/writer + nc.newReaderWriter() + + connectionEstablished, err := nc.connect() + if err != nil { + return nil, err + } + + // Spin up the async cb dispatcher on success + go nc.ach.asyncCBDispatcher() + + if connectionEstablished && nc.Opts.ConnectedCB != nil { + nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) + } + + return nc, nil +} + +func defaultErrHandler(nc *Conn, sub *Subscription, err error) { + var cid uint64 + if nc != nil { + nc.mu.RLock() + cid = nc.info.CID + nc.mu.RUnlock() + } + var errStr string + if sub != nil { + var subject string + sub.mu.Lock() + if sub.jsi != nil { + subject = sub.jsi.psubj + } else { + subject = sub.Subject + } + sub.mu.Unlock() + errStr = fmt.Sprintf("%s on connection [%d] for subscription on %q\n", err.Error(), cid, subject) + } else { + errStr = fmt.Sprintf("%s on connection [%d]\n", err.Error(), cid) + } + os.Stderr.WriteString(errStr) +} + +const ( + _CRLF_ = "\r\n" + _EMPTY_ = "" + _SPC_ = " " + _PUB_P_ = "PUB " + _HPUB_P_ = "HPUB " +) + +var _CRLF_BYTES_ = []byte(_CRLF_) + +const ( + _OK_OP_ = "+OK" + _ERR_OP_ = "-ERR" + _PONG_OP_ = "PONG" + _INFO_OP_ = "INFO" +) + +const ( + connectProto = "CONNECT %s" + _CRLF_ + pingProto = "PING" + _CRLF_ + pongProto = "PONG" + _CRLF_ + subProto = "SUB %s %s %d" + _CRLF_ + unsubProto = "UNSUB %d %s" + _CRLF_ + okProto = _OK_OP_ + _CRLF_ +) + +// Return the currently selected server +func (nc *Conn) currentServer() (int, *srv) { + for i, s := range nc.srvPool { + if s == nil { + continue + } + if s == nc.current { + return i, s + } + } + return -1, nil +} + +// Pop the current server and put onto the end of the list. Select head of list as long +// as number of reconnect attempts under MaxReconnect. +func (nc *Conn) selectNextServer() (*srv, error) { + i, s := nc.currentServer() + if i < 0 { + return nil, ErrNoServers + } + sp := nc.srvPool + num := len(sp) + copy(sp[i:num-1], sp[i+1:num]) + maxReconnect := nc.Opts.MaxReconnect + if maxReconnect < 0 || s.reconnects < maxReconnect { + nc.srvPool[num-1] = s + } else { + nc.srvPool = sp[0 : num-1] + } + if len(nc.srvPool) <= 0 { + nc.current = nil + return nil, ErrNoServers + } + nc.current = nc.srvPool[0] + return nc.srvPool[0], nil +} + +// Will assign the correct server to nc.current +func (nc *Conn) pickServer() error { + nc.current = nil + if len(nc.srvPool) <= 0 { + return ErrNoServers + } + + for _, s := range nc.srvPool { + if s != nil { + nc.current = s + return nil + } + } + return ErrNoServers +} + +const tlsScheme = "tls" + +// Create the server pool using the options given. +// We will place a Url option first, followed by any +// Server Options. We will randomize the server pool unless +// the NoRandomize flag is set. +func (nc *Conn) setupServerPool() error { + nc.srvPool = make([]*srv, 0, srvPoolSize) + nc.urls = make(map[string]struct{}, srvPoolSize) + + // Create srv objects from each url string in nc.Opts.Servers + // and add them to the pool. + for _, urlString := range nc.Opts.Servers { + if err := nc.addURLToPool(urlString, false, false); err != nil { + return err + } + } + + // Randomize if allowed to + if !nc.Opts.NoRandomize { + nc.shufflePool(0) + } + + // Normally, if this one is set, Options.Servers should not be, + // but we always allowed that, so continue to do so. + if nc.Opts.Url != _EMPTY_ { + // Add to the end of the array + if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil { + return err + } + // Then swap it with first to guarantee that Options.Url is tried first. + last := len(nc.srvPool) - 1 + if last > 0 { + nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] + } + } else if len(nc.srvPool) <= 0 { + // Place default URL if pool is empty. + if err := nc.addURLToPool(DefaultURL, false, false); err != nil { + return err + } + } + + // Check for Scheme hint to move to TLS mode. + for _, srv := range nc.srvPool { + if srv.url.Scheme == tlsScheme || srv.url.Scheme == wsSchemeTLS { + // FIXME(dlc), this is for all in the pool, should be case by case. + nc.Opts.Secure = true + if nc.Opts.TLSConfig == nil { + nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + } + } + + return nc.pickServer() +} + +// Helper function to return scheme +func (nc *Conn) connScheme() string { + if nc.ws { + if nc.Opts.Secure { + return wsSchemeTLS + } + return wsScheme + } + if nc.Opts.Secure { + return tlsScheme + } + return "nats" +} + +// Return true iff u.Hostname() is an IP address. +func hostIsIP(u *url.URL) bool { + return net.ParseIP(u.Hostname()) != nil +} + +// addURLToPool adds an entry to the server pool +func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error { + if !strings.Contains(sURL, "://") { + sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL) + } + var ( + u *url.URL + err error + ) + for i := 0; i < 2; i++ { + u, err = url.Parse(sURL) + if err != nil { + return err + } + if u.Port() != "" { + break + } + // In case given URL is of the form "localhost:", just add + // the port number at the end, otherwise, add ":4222". + if sURL[len(sURL)-1] != ':' { + sURL += ":" + } + switch u.Scheme { + case wsScheme: + sURL += defaultWSPortString + case wsSchemeTLS: + sURL += defaultWSSPortString + default: + sURL += defaultPortString + } + } + + isWS := isWebsocketScheme(u) + // We don't support mix and match of websocket and non websocket URLs. + // If this is the first URL, then we accept and switch the global state + // to websocket. After that, we will know how to reject mixed URLs. + if len(nc.srvPool) == 0 { + nc.ws = isWS + } else if isWS && !nc.ws || !isWS && nc.ws { + return fmt.Errorf("mixing of websocket and non websocket URLs is not allowed") + } + + var tlsName string + if implicit { + curl := nc.current.url + // Check to see if we do not have a url.User but current connected + // url does. If so copy over. + if u.User == nil && curl.User != nil { + u.User = curl.User + } + // We are checking to see if we have a secure connection and are + // adding an implicit server that just has an IP. If so we will remember + // the current hostname we are connected to. + if saveTLSName && hostIsIP(u) { + tlsName = curl.Hostname() + } + } + + s := &srv{url: u, isImplicit: implicit, tlsName: tlsName} + nc.srvPool = append(nc.srvPool, s) + nc.urls[u.Host] = struct{}{} + return nil +} + +// shufflePool swaps randomly elements in the server pool +// The `offset` value indicates that the shuffling should start at +// this offset and leave the elements from [0..offset) intact. +func (nc *Conn) shufflePool(offset int) { + if len(nc.srvPool) <= offset+1 { + return + } + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + for i := offset; i < len(nc.srvPool); i++ { + j := offset + r.Intn(i+1-offset) + nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] + } +} + +func (nc *Conn) newReaderWriter() { + nc.br = &natsReader{ + buf: make([]byte, defaultBufSize), + off: -1, + } + nc.bw = &natsWriter{ + limit: defaultBufSize, + plimit: nc.Opts.ReconnectBufSize, + } +} + +func (nc *Conn) bindToNewConn() { + bw := nc.bw + bw.w, bw.bufs = nc.newWriter(), nil + br := nc.br + br.r, br.n, br.off = nc.conn, 0, -1 +} + +func (nc *Conn) newWriter() io.Writer { + var w io.Writer = nc.conn + if nc.Opts.FlusherTimeout > 0 { + w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout} + } + return w +} + +func (w *natsWriter) appendString(str string) error { + return w.appendBufs([]byte(str)) +} + +func (w *natsWriter) appendBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) == 0 { + continue + } + if w.pending != nil { + w.pending.Write(buf) + } else { + w.bufs = append(w.bufs, buf...) + } + } + if w.pending == nil && len(w.bufs) >= w.limit { + return w.flush() + } + return nil +} + +func (w *natsWriter) writeDirect(strs ...string) error { + for _, str := range strs { + if _, err := w.w.Write([]byte(str)); err != nil { + return err + } + } + return nil +} + +func (w *natsWriter) flush() error { + // If a pending buffer is set, we don't flush. Code that needs to + // write directly to the socket, by-passing buffers during (re)connect, + // will use the writeDirect() API. + if w.pending != nil { + return nil + } + // Do not skip calling w.w.Write() here if len(w.bufs) is 0 because + // the actual writer (if websocket for instance) may have things + // to do such as sending control frames, etc.. + _, err := w.w.Write(w.bufs) + w.bufs = w.bufs[:0] + return err +} + +func (w *natsWriter) buffered() int { + if w.pending != nil { + return w.pending.Len() + } + return len(w.bufs) +} + +func (w *natsWriter) switchToPending() { + w.pending = new(bytes.Buffer) +} + +func (w *natsWriter) flushPendingBuffer() error { + if w.pending == nil || w.pending.Len() == 0 { + return nil + } + _, err := w.w.Write(w.pending.Bytes()) + // Reset the pending buffer at this point because we don't want + // to take the risk of sending duplicates or partials. + w.pending.Reset() + return err +} + +func (w *natsWriter) atLimitIfUsingPending() bool { + if w.pending == nil { + return false + } + return w.pending.Len() >= w.plimit +} + +func (w *natsWriter) doneWithPending() { + w.pending = nil +} + +// Notify the reader that we are done with the connect, where "read" operations +// happen synchronously and under the connection lock. After this point, "read" +// will be happening from the read loop, without the connection lock. +// +// Note: this runs under the connection lock. +func (r *natsReader) doneWithConnect() { + if wsr, ok := r.r.(*websocketReader); ok { + wsr.doneWithConnect() + } +} + +func (r *natsReader) Read() ([]byte, error) { + if r.off >= 0 { + off := r.off + r.off = -1 + return r.buf[off:r.n], nil + } + var err error + r.n, err = r.r.Read(r.buf) + return r.buf[:r.n], err +} + +func (r *natsReader) ReadString(delim byte) (string, error) { + var s string +build_string: + // First look if we have something in the buffer + if r.off >= 0 { + i := bytes.IndexByte(r.buf[r.off:r.n], delim) + if i >= 0 { + end := r.off + i + 1 + s += string(r.buf[r.off:end]) + r.off = end + if r.off >= r.n { + r.off = -1 + } + return s, nil + } + // We did not find the delim, so will have to read more. + s += string(r.buf[r.off:r.n]) + r.off = -1 + } + if _, err := r.Read(); err != nil { + return s, err + } + r.off = 0 + goto build_string +} + +// createConn will connect to the server and wrap the appropriate +// bufio structures. It will do the right thing when an existing +// connection is in place. +func (nc *Conn) createConn() (err error) { + if nc.Opts.Timeout < 0 { + return ErrBadTimeout + } + if _, cur := nc.currentServer(); cur == nil { + return ErrNoServers + } + + // If we have a reference to an in-process server then establish a + // connection using that. + if nc.Opts.InProcessServer != nil { + conn, err := nc.Opts.InProcessServer.InProcessConn() + if err != nil { + return fmt.Errorf("failed to get in-process connection: %w", err) + } + nc.conn = conn + nc.bindToNewConn() + return nil + } + + // We will auto-expand host names if they resolve to multiple IPs + hosts := []string{} + u := nc.current.url + + if !nc.Opts.SkipHostLookup && net.ParseIP(u.Hostname()) == nil { + addrs, _ := net.LookupHost(u.Hostname()) + for _, addr := range addrs { + hosts = append(hosts, net.JoinHostPort(addr, u.Port())) + } + } + // Fall back to what we were given. + if len(hosts) == 0 { + hosts = append(hosts, u.Host) + } + + // CustomDialer takes precedence. If not set, use Opts.Dialer which + // is set to a default *net.Dialer (in Connect()) if not explicitly + // set by the user. + dialer := nc.Opts.CustomDialer + if dialer == nil { + // We will copy and shorten the timeout if we have multiple hosts to try. + copyDialer := *nc.Opts.Dialer + copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts)) + dialer = ©Dialer + } + + if len(hosts) > 1 && !nc.Opts.NoRandomize { + rand.Shuffle(len(hosts), func(i, j int) { + hosts[i], hosts[j] = hosts[j], hosts[i] + }) + } + for _, host := range hosts { + nc.conn, err = dialer.Dial("tcp", host) + if err == nil { + break + } + } + if err != nil { + return err + } + + // If scheme starts with "ws" then branch out to websocket code. + if isWebsocketScheme(u) { + return nc.wsInitHandshake(u) + } + + // Reset reader/writer to this new TCP connection + nc.bindToNewConn() + return nil +} + +type skipTLSDialer interface { + SkipTLSHandshake() bool +} + +// makeTLSConn will wrap an existing Conn using TLS +func (nc *Conn) makeTLSConn() error { + if nc.Opts.CustomDialer != nil { + // we do nothing when asked to skip the TLS wrapper + sd, ok := nc.Opts.CustomDialer.(skipTLSDialer) + if ok && sd.SkipTLSHandshake() { + return nil + } + } + // Allow the user to configure their own tls.Config structure. + tlsCopy := &tls.Config{} + if nc.Opts.TLSConfig != nil { + tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig) + } + if nc.Opts.TLSCertCB != nil { + cert, err := nc.Opts.TLSCertCB() + if err != nil { + return err + } + tlsCopy.Certificates = []tls.Certificate{cert} + } + if nc.Opts.RootCAsCB != nil { + rootCAs, err := nc.Opts.RootCAsCB() + if err != nil { + return err + } + tlsCopy.RootCAs = rootCAs + } + // If its blank we will override it with the current host + if tlsCopy.ServerName == _EMPTY_ { + if nc.current.tlsName != _EMPTY_ { + tlsCopy.ServerName = nc.current.tlsName + } else { + h, _, _ := net.SplitHostPort(nc.current.url.Host) + tlsCopy.ServerName = h + } + } + nc.conn = tls.Client(nc.conn, tlsCopy) + conn := nc.conn.(*tls.Conn) + if err := conn.Handshake(); err != nil { + return err + } + nc.bindToNewConn() + return nil +} + +// TLSConnectionState retrieves the state of the TLS connection to the server +func (nc *Conn) TLSConnectionState() (tls.ConnectionState, error) { + if !nc.isConnected() { + return tls.ConnectionState{}, ErrDisconnected + } + + nc.mu.RLock() + conn := nc.conn + nc.mu.RUnlock() + + tc, ok := conn.(*tls.Conn) + if !ok { + return tls.ConnectionState{}, ErrConnectionNotTLS + } + + return tc.ConnectionState(), nil +} + +// waitForExits will wait for all socket watcher Go routines to +// be shutdown before proceeding. +func (nc *Conn) waitForExits() { + // Kick old flusher forcefully. + select { + case nc.fch <- struct{}{}: + default: + } + + // Wait for any previous go routines. + nc.wg.Wait() +} + +// ConnectedUrl reports the connected server's URL +func (nc *Conn) ConnectedUrl() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.current.url.String() +} + +// ConnectedUrlRedacted reports the connected server's URL with passwords redacted +func (nc *Conn) ConnectedUrlRedacted() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.current.url.Redacted() +} + +// ConnectedAddr returns the connected server's IP +func (nc *Conn) ConnectedAddr() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.conn.RemoteAddr().String() +} + +// ConnectedServerId reports the connected server's Id +func (nc *Conn) ConnectedServerId() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.ID +} + +// ConnectedServerName reports the connected server's name +func (nc *Conn) ConnectedServerName() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Name +} + +var semVerRe = regexp.MustCompile(`\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?`) + +func versionComponents(version string) (major, minor, patch int, err error) { + m := semVerRe.FindStringSubmatch(version) + if m == nil { + return 0, 0, 0, errors.New("invalid semver") + } + major, err = strconv.Atoi(m[1]) + if err != nil { + return -1, -1, -1, err + } + minor, err = strconv.Atoi(m[2]) + if err != nil { + return -1, -1, -1, err + } + patch, err = strconv.Atoi(m[3]) + if err != nil { + return -1, -1, -1, err + } + return major, minor, patch, err +} + +// Check for minimum server requirement. +func (nc *Conn) serverMinVersion(major, minor, patch int) bool { + smajor, sminor, spatch, _ := versionComponents(nc.ConnectedServerVersion()) + if smajor < major || (smajor == major && sminor < minor) || (smajor == major && sminor == minor && spatch < patch) { + return false + } + return true +} + +// ConnectedServerVersion reports the connected server's version as a string +func (nc *Conn) ConnectedServerVersion() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Version +} + +// ConnectedClusterName reports the connected server's cluster name if any +func (nc *Conn) ConnectedClusterName() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Cluster +} + +// Low level setup for structs, etc +func (nc *Conn) setup() { + nc.subs = make(map[int64]*Subscription) + nc.pongs = make([]chan struct{}, 0, 8) + + nc.fch = make(chan struct{}, flushChanSize) + nc.rqch = make(chan struct{}) + + // Setup scratch outbound buffer for PUB/HPUB + pub := nc.scratch[:len(_HPUB_P_)] + copy(pub, _HPUB_P_) +} + +// Process a connected connection and initialize properly. +func (nc *Conn) processConnectInit() error { + + // Set our deadline for the whole connect process + nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) + defer nc.conn.SetDeadline(time.Time{}) + + // Set our status to connecting. + nc.changeConnStatus(CONNECTING) + + // If we need to have a TLS connection and want the TLS handshake to occur + // first, do it now. + if nc.Opts.Secure && nc.Opts.TLSHandshakeFirst { + if err := nc.makeTLSConn(); err != nil { + return err + } + } + + // Process the INFO protocol received from the server + err := nc.processExpectedInfo() + if err != nil { + return err + } + + // Send the CONNECT protocol along with the initial PING protocol. + // Wait for the PONG response (or any error that we get from the server). + err = nc.sendConnect() + if err != nil { + return err + } + + // Reset the number of PING sent out + nc.pout = 0 + + // Start or reset Timer + if nc.Opts.PingInterval > 0 { + if nc.ptmr == nil { + nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) + } else { + nc.ptmr.Reset(nc.Opts.PingInterval) + } + } + + // Start the readLoop and flusher go routines, we will wait on both on a reconnect event. + nc.wg.Add(2) + go nc.readLoop() + go nc.flusher() + + // Notify the reader that we are done with the connect handshake, where + // reads were done synchronously and under the connection lock. + nc.br.doneWithConnect() + + return nil +} + +// Main connect function. Will connect to the nats-server. +func (nc *Conn) connect() (bool, error) { + var err error + var connectionEstablished bool + + // Create actual socket connection + // For first connect we walk all servers in the pool and try + // to connect immediately. + nc.mu.Lock() + defer nc.mu.Unlock() + nc.initc = true + // The pool may change inside the loop iteration due to INFO protocol. + for i := 0; i < len(nc.srvPool); i++ { + nc.current = nc.srvPool[i] + + if err = nc.createConn(); err == nil { + // This was moved out of processConnectInit() because + // that function is now invoked from doReconnect() too. + nc.setup() + + err = nc.processConnectInit() + + if err == nil { + nc.current.didConnect = true + nc.current.reconnects = 0 + nc.current.lastErr = nil + break + } else { + nc.mu.Unlock() + nc.close(DISCONNECTED, false, err) + nc.mu.Lock() + // Do not reset nc.current here since it would prevent + // RetryOnFailedConnect to work should this be the last server + // to try before starting doReconnect(). + } + } else { + // Cancel out default connection refused, will trigger the + // No servers error conditional + if strings.Contains(err.Error(), "connection refused") { + err = nil + } + } + } + + if err == nil && nc.status != CONNECTED { + err = ErrNoServers + } + + if err == nil { + connectionEstablished = true + nc.initc = false + } else if nc.Opts.RetryOnFailedConnect { + nc.setup() + nc.changeConnStatus(RECONNECTING) + nc.bw.switchToPending() + go nc.doReconnect(ErrNoServers) + err = nil + } else { + nc.current = nil + } + + return connectionEstablished, err +} + +// This will check to see if the connection should be +// secure. This can be dictated from either end and should +// only be called after the INIT protocol has been received. +func (nc *Conn) checkForSecure() error { + // Check to see if we need to engage TLS + o := nc.Opts + + // Check for mismatch in setups + if o.Secure && !nc.info.TLSRequired && !nc.info.TLSAvailable { + return ErrSecureConnWanted + } else if nc.info.TLSRequired && !o.Secure { + // Switch to Secure since server needs TLS. + o.Secure = true + } + + if o.Secure { + // If TLS handshake first is true, we have already done + // the handshake, so we are done here. + if o.TLSHandshakeFirst { + return nil + } + // Need to rewrap with bufio + if err := nc.makeTLSConn(); err != nil { + return err + } + } + return nil +} + +// processExpectedInfo will look for the expected first INFO message +// sent when a connection is established. The lock should be held entering. +func (nc *Conn) processExpectedInfo() error { + + c := &control{} + + // Read the protocol + err := nc.readOp(c) + if err != nil { + return err + } + + // The nats protocol should send INFO first always. + if c.op != _INFO_OP_ { + return ErrNoInfoReceived + } + + // Parse the protocol + if err := nc.processInfo(c.args); err != nil { + return err + } + + if nc.Opts.Nkey != "" && nc.info.Nonce == "" { + return ErrNkeysNotSupported + } + + // For websocket connections, we already switched to TLS if need be, + // so we are done here. + if nc.ws { + return nil + } + + return nc.checkForSecure() +} + +// Sends a protocol control message by queuing into the bufio writer +// and kicking the flush Go routine. These writes are protected. +func (nc *Conn) sendProto(proto string) { + nc.mu.Lock() + nc.bw.appendString(proto) + nc.kickFlusher() + nc.mu.Unlock() +} + +// Generate a connect protocol message, issuing user/password if +// applicable. The lock is assumed to be held upon entering. +func (nc *Conn) connectProto() (string, error) { + o := nc.Opts + var nkey, sig, user, pass, token, ujwt string + u := nc.current.url.User + if u != nil { + // if no password, assume username is authToken + if _, ok := u.Password(); !ok { + token = u.Username() + } else { + user = u.Username() + pass, _ = u.Password() + } + } else { + // Take from options (possibly all empty strings) + user = o.User + pass = o.Password + token = o.Token + nkey = o.Nkey + } + + // Look for user jwt. + if o.UserJWT != nil { + if jwt, err := o.UserJWT(); err != nil { + return _EMPTY_, err + } else { + ujwt = jwt + } + if nkey != _EMPTY_ { + return _EMPTY_, ErrNkeyAndUser + } + } + + if ujwt != _EMPTY_ || nkey != _EMPTY_ { + if o.SignatureCB == nil { + if ujwt == _EMPTY_ { + return _EMPTY_, ErrNkeyButNoSigCB + } + return _EMPTY_, ErrUserButNoSigCB + } + sigraw, err := o.SignatureCB([]byte(nc.info.Nonce)) + if err != nil { + return _EMPTY_, fmt.Errorf("error signing nonce: %w", err) + } + sig = base64.RawURLEncoding.EncodeToString(sigraw) + } + + if nc.Opts.TokenHandler != nil { + if token != _EMPTY_ { + return _EMPTY_, ErrTokenAlreadySet + } + token = nc.Opts.TokenHandler() + } + + // If our server does not support headers then we can't do them or no responders. + hdrs := nc.info.Headers + cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token, + o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs} + + b, err := json.Marshal(cinfo) + if err != nil { + return _EMPTY_, ErrJsonParse + } + + // Check if NoEcho is set and we have a server that supports it. + if o.NoEcho && nc.info.Proto < 1 { + return _EMPTY_, ErrNoEchoNotSupported + } + + return fmt.Sprintf(connectProto, b), nil +} + +// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. +func normalizeErr(line string) string { + s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_)) + s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") + return s +} + +// natsProtoErr represents an -ERR protocol message sent by the server. +type natsProtoErr struct { + description string +} + +func (nerr *natsProtoErr) Error() string { + return fmt.Sprintf("nats: %s", nerr.description) +} + +func (nerr *natsProtoErr) Is(err error) bool { + return strings.ToLower(nerr.Error()) == err.Error() +} + +// Send a connect protocol message to the server, issue user/password if +// applicable. Will wait for a flush to return from the server for error +// processing. +func (nc *Conn) sendConnect() error { + // Construct the CONNECT protocol string + cProto, err := nc.connectProto() + if err != nil { + if !nc.initc && nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + return err + } + + // Write the protocol and PING directly to the underlying writer. + if err := nc.bw.writeDirect(cProto, pingProto); err != nil { + return err + } + + // We don't want to read more than we need here, otherwise + // we would need to transfer the excess read data to the readLoop. + // Since in normal situations we just are looking for a PONG\r\n, + // reading byte-by-byte here is ok. + proto, err := nc.readProto() + if err != nil { + if !nc.initc && nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + return err + } + + // If opts.Verbose is set, handle +OK + if nc.Opts.Verbose && proto == okProto { + // Read the rest now... + proto, err = nc.readProto() + if err != nil { + if !nc.initc && nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + return err + } + } + + // We expect a PONG + if proto != pongProto { + // But it could be something else, like -ERR + + // Since we no longer use ReadLine(), trim the trailing "\r\n" + proto = strings.TrimRight(proto, "\r\n") + + // If it's a server error... + if strings.HasPrefix(proto, _ERR_OP_) { + // Remove -ERR, trim spaces and quotes, and convert to lower case. + proto = normalizeErr(proto) + + // Check if this is an auth error + if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil { + // This will schedule an async error if we are in reconnect, + // and keep track of the auth error for the current server. + // If we have got the same error twice, this sets nc.ar to true to + // indicate that the reconnect should be aborted (will be checked + // in doReconnect()). + nc.processAuthError(authErr) + } + return &natsProtoErr{proto} + } + + // Notify that we got an unexpected protocol. + return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto) + } + + // This is where we are truly connected. + nc.changeConnStatus(CONNECTED) + + return nil +} + +// reads a protocol line. +func (nc *Conn) readProto() (string, error) { + return nc.br.ReadString('\n') +} + +// A control protocol line. +type control struct { + op, args string +} + +// Read a control line and process the intended op. +func (nc *Conn) readOp(c *control) error { + line, err := nc.readProto() + if err != nil { + return err + } + parseControl(line, c) + return nil +} + +// Parse a control line from the server. +func parseControl(line string, c *control) { + toks := strings.SplitN(line, _SPC_, 2) + if len(toks) == 1 { + c.op = strings.TrimSpace(toks[0]) + c.args = _EMPTY_ + } else if len(toks) == 2 { + c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + } else { + c.op = _EMPTY_ + } +} + +// flushReconnectPendingItems will push the pending items that were +// gathered while we were in a RECONNECTING state to the socket. +func (nc *Conn) flushReconnectPendingItems() error { + return nc.bw.flushPendingBuffer() +} + +// Stops the ping timer if set. +// Connection lock is held on entry. +func (nc *Conn) stopPingTimer() { + if nc.ptmr != nil { + nc.ptmr.Stop() + } +} + +// Try to reconnect using the option parameters. +// This function assumes we are allowed to reconnect. +func (nc *Conn) doReconnect(err error) { + // We want to make sure we have the other watchers shutdown properly + // here before we proceed past this point. + nc.waitForExits() + + // FIXME(dlc) - We have an issue here if we have + // outstanding flush points (pongs) and they were not + // sent out, but are still in the pipe. + + // Hold the lock manually and release where needed below, + // can't do defer here. + nc.mu.Lock() + + // Clear any errors. + nc.err = nil + // Perform appropriate callback if needed for a disconnect. + // DisconnectedErrCB has priority over deprecated DisconnectedCB + if !nc.initc { + if nc.Opts.DisconnectedErrCB != nil { + nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) + } else if nc.Opts.DisconnectedCB != nil { + nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) + } + } + + // This is used to wait on go routines exit if we start them in the loop + // but an error occurs after that. + waitForGoRoutines := false + var rt *time.Timer + // Channel used to kick routine out of sleep when conn is closed. + rqch := nc.rqch + // Counter that is increased when the whole list of servers has been tried. + var wlf int + + var jitter time.Duration + var rw time.Duration + // If a custom reconnect delay handler is set, this takes precedence. + crd := nc.Opts.CustomReconnectDelayCB + if crd == nil { + rw = nc.Opts.ReconnectWait + // TODO: since we sleep only after the whole list has been tried, we can't + // rely on individual *srv to know if it is a TLS or non-TLS url. + // We have to pick which type of jitter to use, for now, we use these hints: + jitter = nc.Opts.ReconnectJitter + if nc.Opts.Secure || nc.Opts.TLSConfig != nil { + jitter = nc.Opts.ReconnectJitterTLS + } + } + + for i := 0; len(nc.srvPool) > 0; { + cur, err := nc.selectNextServer() + if err != nil { + nc.err = err + break + } + + doSleep := i+1 >= len(nc.srvPool) + nc.mu.Unlock() + + if !doSleep { + i++ + // Release the lock to give a chance to a concurrent nc.Close() to break the loop. + runtime.Gosched() + } else { + i = 0 + var st time.Duration + if crd != nil { + wlf++ + st = crd(wlf) + } else { + st = rw + if jitter > 0 { + st += time.Duration(rand.Int63n(int64(jitter))) + } + } + if rt == nil { + rt = time.NewTimer(st) + } else { + rt.Reset(st) + } + select { + case <-rqch: + rt.Stop() + case <-rt.C: + } + } + // If the readLoop, etc.. go routines were started, wait for them to complete. + if waitForGoRoutines { + nc.waitForExits() + waitForGoRoutines = false + } + nc.mu.Lock() + + // Check if we have been closed first. + if nc.isClosed() { + break + } + + // Mark that we tried a reconnect + cur.reconnects++ + + // Try to create a new connection + err = nc.createConn() + + // Not yet connected, retry... + // Continue to hold the lock + if err != nil { + nc.err = nil + continue + } + + // We are reconnected + nc.Reconnects++ + + // Process connect logic + if nc.err = nc.processConnectInit(); nc.err != nil { + // Check if we should abort reconnect. If so, break out + // of the loop and connection will be closed. + if nc.ar { + break + } + nc.changeConnStatus(RECONNECTING) + continue + } + + // Clear possible lastErr under the connection lock after + // a successful processConnectInit(). + nc.current.lastErr = nil + + // Clear out server stats for the server we connected to.. + cur.didConnect = true + cur.reconnects = 0 + + // Send existing subscription state + nc.resendSubscriptions() + + // Now send off and clear pending buffer + nc.err = nc.flushReconnectPendingItems() + if nc.err != nil { + nc.changeConnStatus(RECONNECTING) + // Stop the ping timer (if set) + nc.stopPingTimer() + // Since processConnectInit() returned without error, the + // go routines were started, so wait for them to return + // on the next iteration (after releasing the lock). + waitForGoRoutines = true + continue + } + + // Done with the pending buffer + nc.bw.doneWithPending() + + // This is where we are truly connected. + nc.status = CONNECTED + + // If we are here with a retry on failed connect, indicate that the + // initial connect is now complete. + nc.initc = false + + // Queue up the reconnect callback. + if nc.Opts.ReconnectedCB != nil { + nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) + } + + // Release lock here, we will return below. + nc.mu.Unlock() + + // Make sure to flush everything + nc.Flush() + + return + } + + // Call into close.. We have no servers left.. + if nc.err == nil { + nc.err = ErrNoServers + } + nc.mu.Unlock() + nc.close(CLOSED, true, nil) +} + +// processOpErr handles errors from reading or parsing the protocol. +// The lock should not be held entering this function. +func (nc *Conn) processOpErr(err error) { + nc.mu.Lock() + if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { + nc.mu.Unlock() + return + } + + if nc.Opts.AllowReconnect && nc.status == CONNECTED { + // Set our new status + nc.changeConnStatus(RECONNECTING) + // Stop ping timer if set + nc.stopPingTimer() + if nc.conn != nil { + nc.conn.Close() + nc.conn = nil + } + + // Create pending buffer before reconnecting. + nc.bw.switchToPending() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + go nc.doReconnect(err) + nc.mu.Unlock() + return + } + + nc.changeConnStatus(DISCONNECTED) + nc.err = err + nc.mu.Unlock() + nc.close(CLOSED, true, nil) +} + +// dispatch is responsible for calling any async callbacks +func (ac *asyncCallbacksHandler) asyncCBDispatcher() { + for { + ac.mu.Lock() + // Protect for spurious wakeups. We should get out of the + // wait only if there is an element to pop from the list. + for ac.head == nil { + ac.cond.Wait() + } + cur := ac.head + ac.head = cur.next + if cur == ac.tail { + ac.tail = nil + } + ac.mu.Unlock() + + // This signals that the dispatcher has been closed and all + // previous callbacks have been dispatched. + if cur.f == nil { + return + } + // Invoke callback outside of handler's lock + cur.f() + } +} + +// Add the given function to the tail of the list and +// signals the dispatcher. +func (ac *asyncCallbacksHandler) push(f func()) { + ac.pushOrClose(f, false) +} + +// Signals that we are closing... +func (ac *asyncCallbacksHandler) close() { + ac.pushOrClose(nil, true) +} + +// Add the given function to the tail of the list and +// signals the dispatcher. +func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) { + ac.mu.Lock() + defer ac.mu.Unlock() + // Make sure that library is not calling push with nil function, + // since this is used to notify the dispatcher that it should stop. + if !close && f == nil { + panic("pushing a nil callback") + } + cb := &asyncCB{f: f} + if ac.tail != nil { + ac.tail.next = cb + } else { + ac.head = cb + } + ac.tail = cb + if close { + ac.cond.Broadcast() + } else { + ac.cond.Signal() + } +} + +// readLoop() will sit on the socket reading and processing the +// protocol from the server. It will dispatch appropriately based +// on the op type. +func (nc *Conn) readLoop() { + // Release the wait group on exit + defer nc.wg.Done() + + // Create a parseState if needed. + nc.mu.Lock() + if nc.ps == nil { + nc.ps = &parseState{} + } + conn := nc.conn + br := nc.br + nc.mu.Unlock() + + if conn == nil { + return + } + + for { + buf, err := br.Read() + if err == nil { + // With websocket, it is possible that there is no error but + // also no buffer returned (either WS control message or read of a + // partial compressed message). We could call parse(buf) which + // would ignore an empty buffer, but simply go back to top of the loop. + if len(buf) == 0 { + continue + } + err = nc.parse(buf) + } + if err != nil { + nc.processOpErr(err) + break + } + } + // Clear the parseState here.. + nc.mu.Lock() + nc.ps = nil + nc.mu.Unlock() +} + +// waitForMsgs waits on the conditional shared with readLoop and processMsg. +// It is used to deliver messages to asynchronous subscribers. +func (nc *Conn) waitForMsgs(s *Subscription) { + var closed bool + var delivered, max uint64 + + // Used to account for adjustments to sub.pBytes when we wrap back around. + msgLen := -1 + + for { + s.mu.Lock() + // Do accounting for last msg delivered here so we only lock once + // and drain state trips after callback has returned. + if msgLen >= 0 { + s.pMsgs-- + s.pBytes -= msgLen + msgLen = -1 + } + + if s.pHead == nil && !s.closed { + s.pCond.Wait() + } + // Pop the msg off the list + m := s.pHead + if m != nil { + s.pHead = m.next + if s.pHead == nil { + s.pTail = nil + } + if m.barrier != nil { + s.mu.Unlock() + if atomic.AddInt64(&m.barrier.refs, -1) == 0 { + m.barrier.f() + } + continue + } + msgLen = len(m.Data) + } + mcb := s.mcb + max = s.max + closed = s.closed + var fcReply string + if !s.closed { + s.delivered++ + delivered = s.delivered + if s.jsi != nil { + fcReply = s.checkForFlowControlResponse() + } + } + s.mu.Unlock() + + // Respond to flow control if applicable + if fcReply != _EMPTY_ { + nc.Publish(fcReply, nil) + } + + if closed { + break + } + + // Deliver the message. + if m != nil && (max == 0 || delivered <= max) { + mcb(m) + } + // If we have hit the max for delivered msgs, remove sub. + if max > 0 && delivered >= max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + break + } + } + // Check for barrier messages + s.mu.Lock() + for m := s.pHead; m != nil; m = s.pHead { + if m.barrier != nil { + s.mu.Unlock() + if atomic.AddInt64(&m.barrier.refs, -1) == 0 { + m.barrier.f() + } + s.mu.Lock() + } + s.pHead = m.next + } + // Now check for pDone + done := s.pDone + s.mu.Unlock() + + if done != nil { + done(s.Subject) + } +} + +// Used for debugging and simulating loss for certain tests. +// Return what is to be used. If we return nil the message will be dropped. +type msgFilter func(m *Msg) *Msg + +// processMsg is called by parse and will place the msg on the +// appropriate channel/pending queue for processing. If the channel is full, +// or the pending queue is over the pending limits, the connection is +// considered a slow consumer. +func (nc *Conn) processMsg(data []byte) { + // Stats + atomic.AddUint64(&nc.InMsgs, 1) + atomic.AddUint64(&nc.InBytes, uint64(len(data))) + + // Don't lock the connection to avoid server cutting us off if the + // flusher is holding the connection lock, trying to send to the server + // that is itself trying to send data to us. + nc.subsMu.RLock() + sub := nc.subs[nc.ps.ma.sid] + var mf msgFilter + if nc.filters != nil { + mf = nc.filters[string(nc.ps.ma.subject)] + } + nc.subsMu.RUnlock() + + if sub == nil { + return + } + + // Copy them into string + subj := string(nc.ps.ma.subject) + reply := string(nc.ps.ma.reply) + + // Doing message create outside of the sub's lock to reduce contention. + // It's possible that we end-up not using the message, but that's ok. + + // FIXME(dlc): Need to copy, should/can do COW? + var msgPayload = data + if !nc.ps.msgCopied { + msgPayload = make([]byte, len(data)) + copy(msgPayload, data) + } + + // Check if we have headers encoded here. + var h Header + var err error + var ctrlMsg bool + var ctrlType int + var fcReply string + + if nc.ps.ma.hdr > 0 { + hbuf := msgPayload[:nc.ps.ma.hdr] + msgPayload = msgPayload[nc.ps.ma.hdr:] + h, err = DecodeHeadersMsg(hbuf) + if err != nil { + // We will pass the message through but send async error. + nc.mu.Lock() + nc.err = ErrBadHeaderMsg + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) }) + } + nc.mu.Unlock() + } + } + + // FIXME(dlc): Should we recycle these containers? + m := &Msg{ + Subject: subj, + Reply: reply, + Header: h, + Data: msgPayload, + Sub: sub, + wsz: len(data) + len(subj) + len(reply), + } + + // Check for message filters. + if mf != nil { + if m = mf(m); m == nil { + // Drop message. + return + } + } + + sub.mu.Lock() + + // Check if closed. + if sub.closed { + sub.mu.Unlock() + return + } + + // Skip flow control messages in case of using a JetStream context. + jsi := sub.jsi + if jsi != nil { + // There has to be a header for it to be a control message. + if h != nil { + ctrlMsg, ctrlType = isJSControlMessage(m) + if ctrlMsg && ctrlType == jsCtrlHB { + // Check if the heartbeat has a "Consumer Stalled" header, if + // so, the value is the FC reply to send a nil message to. + // We will send it at the end of this function. + fcReply = m.Header.Get(consumerStalledHdr) + } + } + // Check for ordered consumer here. If checkOrderedMsgs returns true that means it detected a gap. + if !ctrlMsg && jsi.ordered && sub.checkOrderedMsgs(m) { + sub.mu.Unlock() + return + } + } + + // Skip processing if this is a control message and + // if not a pull consumer heartbeat. For pull consumers, + // heartbeats have to be handled on per request basis. + if !ctrlMsg || (jsi != nil && jsi.pull) { + var chanSubCheckFC bool + // Subscription internal stats (applicable only for non ChanSubscription's) + if sub.typ != ChanSubscription { + sub.pMsgs++ + if sub.pMsgs > sub.pMsgsMax { + sub.pMsgsMax = sub.pMsgs + } + sub.pBytes += len(m.Data) + if sub.pBytes > sub.pBytesMax { + sub.pBytesMax = sub.pBytes + } + + // Check for a Slow Consumer + if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || + (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { + goto slowConsumer + } + } else if jsi != nil { + chanSubCheckFC = true + } + + // We have two modes of delivery. One is the channel, used by channel + // subscribers and syncSubscribers, the other is a linked list for async. + if sub.mch != nil { + select { + case sub.mch <- m: + default: + goto slowConsumer + } + } else { + // Push onto the async pList + if sub.pHead == nil { + sub.pHead = m + sub.pTail = m + if sub.pCond != nil { + sub.pCond.Signal() + } + } else { + sub.pTail.next = m + sub.pTail = m + } + } + if jsi != nil { + // Store the ACK metadata from the message to + // compare later on with the received heartbeat. + sub.trackSequences(m.Reply) + if chanSubCheckFC { + // For ChanSubscription, since we can't call this when a message + // is "delivered" (since user is pull from their own channel), + // we have a go routine that does this check, however, we do it + // also here to make it much more responsive. The go routine is + // really to avoid stalling when there is no new messages coming. + fcReply = sub.checkForFlowControlResponse() + } + } + } else if ctrlType == jsCtrlFC && m.Reply != _EMPTY_ { + // This is a flow control message. + // We will schedule the send of the FC reply once we have delivered the + // DATA message that was received before this flow control message, which + // has sequence `jsi.fciseq`. However, it is possible that this message + // has already been delivered, in that case, we need to send the FC reply now. + if sub.getJSDelivered() >= jsi.fciseq { + fcReply = m.Reply + } else { + // Schedule a reply after the previous message is delivered. + sub.scheduleFlowControlResponse(m.Reply) + } + } + + // Clear any SlowConsumer status. + sub.sc = false + sub.mu.Unlock() + + if fcReply != _EMPTY_ { + nc.Publish(fcReply, nil) + } + + // Handle control heartbeat messages. + if ctrlMsg && ctrlType == jsCtrlHB && m.Reply == _EMPTY_ { + nc.checkForSequenceMismatch(m, sub, jsi) + } + + return + +slowConsumer: + sub.dropped++ + sc := !sub.sc + sub.sc = true + // Undo stats from above + if sub.typ != ChanSubscription { + sub.pMsgs-- + sub.pBytes -= len(m.Data) + } + sub.mu.Unlock() + if sc { + // Now we need connection's lock and we may end-up in the situation + // that we were trying to avoid, except that in this case, the client + // is already experiencing client-side slow consumer situation. + nc.mu.Lock() + nc.err = ErrSlowConsumer + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) + } + nc.mu.Unlock() + } +} + +// processPermissionsViolation is called when the server signals a subject +// permissions violation on either publish or subscribe. +func (nc *Conn) processPermissionsViolation(err string) { + nc.mu.Lock() + // create error here so we can pass it as a closure to the async cb dispatcher. + e := errors.New("nats: " + err) + nc.err = e + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) }) + } + nc.mu.Unlock() +} + +// processAuthError generally processing for auth errors. We want to do retries +// unless we get the same error again. This allows us for instance to swap credentials +// and have the app reconnect, but if nothing is changing we should bail. +// This function will return true if the connection should be closed, false otherwise. +// Connection lock is held on entry +func (nc *Conn) processAuthError(err error) bool { + nc.err = err + if !nc.initc && nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + // We should give up if we tried twice on this server and got the + // same error. This behavior can be modified using IgnoreAuthErrorAbort. + if nc.current.lastErr == err && !nc.Opts.IgnoreAuthErrorAbort { + nc.ar = true + } else { + nc.current.lastErr = err + } + return nc.ar +} + +// flusher is a separate Go routine that will process flush requests for the write +// bufio. This allows coalescing of writes to the underlying socket. +func (nc *Conn) flusher() { + // Release the wait group + defer nc.wg.Done() + + // snapshot the bw and conn since they can change from underneath of us. + nc.mu.Lock() + bw := nc.bw + conn := nc.conn + fch := nc.fch + nc.mu.Unlock() + + if conn == nil || bw == nil { + return + } + + for { + if _, ok := <-fch; !ok { + return + } + nc.mu.Lock() + + // Check to see if we should bail out. + if !nc.isConnected() || nc.isConnecting() || conn != nc.conn { + nc.mu.Unlock() + return + } + if bw.buffered() > 0 { + if err := bw.flush(); err != nil { + if nc.err == nil { + nc.err = err + } + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + } + } + nc.mu.Unlock() + } +} + +// processPing will send an immediate pong protocol response to the +// server. The server uses this mechanism to detect dead clients. +func (nc *Conn) processPing() { + nc.sendProto(pongProto) +} + +// processPong is used to process responses to the client's ping +// messages. We use pings for the flush mechanism as well. +func (nc *Conn) processPong() { + var ch chan struct{} + + nc.mu.Lock() + if len(nc.pongs) > 0 { + ch = nc.pongs[0] + nc.pongs = append(nc.pongs[:0], nc.pongs[1:]...) + } + nc.pout = 0 + nc.mu.Unlock() + if ch != nil { + ch <- struct{}{} + } +} + +// processOK is a placeholder for processing OK messages. +func (nc *Conn) processOK() { + // do nothing +} + +// processInfo is used to parse the info messages sent +// from the server. +// This function may update the server pool. +func (nc *Conn) processInfo(info string) error { + if info == _EMPTY_ { + return nil + } + var ncInfo serverInfo + if err := json.Unmarshal([]byte(info), &ncInfo); err != nil { + return err + } + + // Copy content into connection's info structure. + nc.info = ncInfo + // The array could be empty/not present on initial connect, + // if advertise is disabled on that server, or servers that + // did not include themselves in the async INFO protocol. + // If empty, do not remove the implicit servers from the pool. + if len(nc.info.ConnectURLs) == 0 { + if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { + nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) + } + return nil + } + // Note about pool randomization: when the pool was first created, + // it was randomized (if allowed). We keep the order the same (removing + // implicit servers that are no longer sent to us). New URLs are sent + // to us in no specific order so don't need extra randomization. + hasNew := false + // This is what we got from the server we are connected to. + urls := nc.info.ConnectURLs + // Transform that to a map for easy lookups + tmp := make(map[string]struct{}, len(urls)) + for _, curl := range urls { + tmp[curl] = struct{}{} + } + // Walk the pool and removed the implicit servers that are no longer in the + // given array/map + sp := nc.srvPool + for i := 0; i < len(sp); i++ { + srv := sp[i] + curl := srv.url.Host + // Check if this URL is in the INFO protocol + _, inInfo := tmp[curl] + // Remove from the temp map so that at the end we are left with only + // new (or restarted) servers that need to be added to the pool. + delete(tmp, curl) + // Keep servers that were set through Options, but also the one that + // we are currently connected to (even if it is a discovered server). + if !srv.isImplicit || srv.url == nc.current.url { + continue + } + if !inInfo { + // Remove from server pool. Keep current order. + copy(sp[i:], sp[i+1:]) + nc.srvPool = sp[:len(sp)-1] + sp = nc.srvPool + i-- + } + } + // Figure out if we should save off the current non-IP hostname if we encounter a bare IP. + saveTLS := nc.current != nil && !hostIsIP(nc.current.url) + + // If there are any left in the tmp map, these are new (or restarted) servers + // and need to be added to the pool. + for curl := range tmp { + // Before adding, check if this is a new (as in never seen) URL. + // This is used to figure out if we invoke the DiscoveredServersCB + if _, present := nc.urls[curl]; !present { + hasNew = true + } + nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS) + } + if hasNew { + // Randomize the pool if allowed but leave the first URL in place. + if !nc.Opts.NoRandomize { + nc.shufflePool(1) + } + if !nc.initc && nc.Opts.DiscoveredServersCB != nil { + nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) + } + } + if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { + nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) + } + return nil +} + +// processAsyncInfo does the same than processInfo, but is called +// from the parser. Calls processInfo under connection's lock +// protection. +func (nc *Conn) processAsyncInfo(info []byte) { + nc.mu.Lock() + // Ignore errors, we will simply not update the server pool... + nc.processInfo(string(info)) + nc.mu.Unlock() +} + +// LastError reports the last error encountered via the connection. +// It can be used reliably within ClosedCB in order to find out reason +// why connection was closed for example. +func (nc *Conn) LastError() error { + if nc == nil { + return ErrInvalidConnection + } + nc.mu.RLock() + err := nc.err + nc.mu.RUnlock() + return err +} + +// Check if the given error string is an auth error, and if so returns +// the corresponding ErrXXX error, nil otherwise +func checkAuthError(e string) error { + if strings.HasPrefix(e, AUTHORIZATION_ERR) { + return ErrAuthorization + } + if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) { + return ErrAuthExpired + } + if strings.HasPrefix(e, AUTHENTICATION_REVOKED_ERR) { + return ErrAuthRevoked + } + if strings.HasPrefix(e, ACCOUNT_AUTHENTICATION_EXPIRED_ERR) { + return ErrAccountAuthExpired + } + return nil +} + +// processErr processes any error messages from the server and +// sets the connection's LastError. +func (nc *Conn) processErr(ie string) { + // Trim, remove quotes + ne := normalizeErr(ie) + // convert to lower case. + e := strings.ToLower(ne) + + close := false + + // FIXME(dlc) - process Slow Consumer signals special. + if e == STALE_CONNECTION { + nc.processOpErr(ErrStaleConnection) + } else if e == MAX_CONNECTIONS_ERR { + nc.processOpErr(ErrMaxConnectionsExceeded) + } else if strings.HasPrefix(e, PERMISSIONS_ERR) { + nc.processPermissionsViolation(ne) + } else if authErr := checkAuthError(e); authErr != nil { + nc.mu.Lock() + close = nc.processAuthError(authErr) + nc.mu.Unlock() + } else { + close = true + nc.mu.Lock() + nc.err = errors.New("nats: " + ne) + nc.mu.Unlock() + } + if close { + nc.close(CLOSED, true, nil) + } +} + +// kickFlusher will send a bool on a channel to kick the +// flush Go routine to flush data to the server. +func (nc *Conn) kickFlusher() { + if nc.bw != nil { + select { + case nc.fch <- struct{}{}: + default: + } + } +} + +// Publish publishes the data argument to the given subject. The data +// argument is left untouched and needs to be correctly interpreted on +// the receiver. +func (nc *Conn) Publish(subj string, data []byte) error { + return nc.publish(subj, _EMPTY_, nil, data) +} + +// Header represents the optional Header for a NATS message, +// based on the implementation of http.Header. +type Header map[string][]string + +// Add adds the key, value pair to the header. It is case-sensitive +// and appends to any existing values associated with key. +func (h Header) Add(key, value string) { + h[key] = append(h[key], value) +} + +// Set sets the header entries associated with key to the single +// element value. It is case-sensitive and replaces any existing +// values associated with key. +func (h Header) Set(key, value string) { + h[key] = []string{value} +} + +// Get gets the first value associated with the given key. +// It is case-sensitive. +func (h Header) Get(key string) string { + if h == nil { + return _EMPTY_ + } + if v := h[key]; v != nil { + return v[0] + } + return _EMPTY_ +} + +// Values returns all values associated with the given key. +// It is case-sensitive. +func (h Header) Values(key string) []string { + return h[key] +} + +// Del deletes the values associated with a key. +// It is case-sensitive. +func (h Header) Del(key string) { + delete(h, key) +} + +// NewMsg creates a message for publishing that will use headers. +func NewMsg(subject string) *Msg { + return &Msg{ + Subject: subject, + Header: make(Header), + } +} + +const ( + hdrLine = "NATS/1.0\r\n" + crlf = "\r\n" + hdrPreEnd = len(hdrLine) - len(crlf) + statusHdr = "Status" + descrHdr = "Description" + lastConsumerSeqHdr = "Nats-Last-Consumer" + lastStreamSeqHdr = "Nats-Last-Stream" + consumerStalledHdr = "Nats-Consumer-Stalled" + noResponders = "503" + noMessagesSts = "404" + reqTimeoutSts = "408" + jetStream409Sts = "409" + controlMsg = "100" + statusLen = 3 // e.g. 20x, 40x, 50x +) + +// DecodeHeadersMsg will decode and headers. +func DecodeHeadersMsg(data []byte) (Header, error) { + br := bufio.NewReaderSize(bytes.NewReader(data), 128) + tp := textproto.NewReader(br) + l, err := tp.ReadLine() + if err != nil || len(l) < hdrPreEnd || l[:hdrPreEnd] != hdrLine[:hdrPreEnd] { + return nil, ErrBadHeaderMsg + } + + mh, err := readMIMEHeader(tp) + if err != nil { + return nil, err + } + + // Check if we have an inlined status. + if len(l) > hdrPreEnd { + var description string + status := strings.TrimSpace(l[hdrPreEnd:]) + if len(status) != statusLen { + description = strings.TrimSpace(status[statusLen:]) + status = status[:statusLen] + } + mh.Add(statusHdr, status) + if len(description) > 0 { + mh.Add(descrHdr, description) + } + } + return Header(mh), nil +} + +// readMIMEHeader returns a MIMEHeader that preserves the +// original case of the MIME header, based on the implementation +// of textproto.ReadMIMEHeader. +// +// https://golang.org/pkg/net/textproto/#Reader.ReadMIMEHeader +func readMIMEHeader(tp *textproto.Reader) (textproto.MIMEHeader, error) { + m := make(textproto.MIMEHeader) + for { + kv, err := tp.ReadLine() + if len(kv) == 0 { + return m, err + } + + // Process key fetching original case. + i := bytes.IndexByte([]byte(kv), ':') + if i < 0 { + return nil, ErrBadHeaderMsg + } + key := kv[:i] + if key == "" { + // Skip empty keys. + continue + } + i++ + for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') { + i++ + } + value := string(kv[i:]) + m[key] = append(m[key], value) + if err != nil { + return m, err + } + } +} + +// PublishMsg publishes the Msg structure, which includes the +// Subject, an optional Reply and an optional Data field. +func (nc *Conn) PublishMsg(m *Msg) error { + if m == nil { + return ErrInvalidMsg + } + hdr, err := m.headerBytes() + if err != nil { + return err + } + return nc.publish(m.Subject, m.Reply, hdr, m.Data) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { + return nc.publish(subj, reply, nil, data) +} + +// Used for handrolled Itoa +const digits = "0123456789" + +// publish is the internal function to publish messages to a nats-server. +// Sends a protocol data message by queuing into the bufio writer +// and kicking the flush go routine. These writes should be protected. +func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { + if nc == nil { + return ErrInvalidConnection + } + if subj == "" { + return ErrBadSubject + } + nc.mu.Lock() + + // Check if headers attempted to be sent to server that does not support them. + if len(hdr) > 0 && !nc.info.Headers { + nc.mu.Unlock() + return ErrHeadersNotSupported + } + + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + + if nc.isDrainingPubs() { + nc.mu.Unlock() + return ErrConnectionDraining + } + + // Proactively reject payloads over the threshold set by server. + msgSize := int64(len(data) + len(hdr)) + // Skip this check if we are not yet connected (RetryOnFailedConnect) + if !nc.initc && msgSize > nc.info.MaxPayload { + nc.mu.Unlock() + return ErrMaxPayload + } + + // Check if we are reconnecting, and if so check if + // we have exceeded our reconnect outbound buffer limits. + if nc.bw.atLimitIfUsingPending() { + nc.mu.Unlock() + return ErrReconnectBufExceeded + } + + var mh []byte + if hdr != nil { + mh = nc.scratch[:len(_HPUB_P_)] + } else { + mh = nc.scratch[1:len(_HPUB_P_)] + } + mh = append(mh, subj...) + mh = append(mh, ' ') + if reply != "" { + mh = append(mh, reply...) + mh = append(mh, ' ') + } + + // We could be smarter here, but simple loop is ok, + // just avoid strconv in fast path. + // FIXME(dlc) - Find a better way here. + // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) + // go 1.14 some values strconv faster, may be able to switch over. + + var b [12]byte + var i = len(b) + + if hdr != nil { + if len(hdr) > 0 { + for l := len(hdr); l > 0; l /= 10 { + i-- + b[i] = digits[l%10] + } + } else { + i-- + b[i] = digits[0] + } + mh = append(mh, b[i:]...) + mh = append(mh, ' ') + // reset for below. + i = len(b) + } + + if msgSize > 0 { + for l := msgSize; l > 0; l /= 10 { + i-- + b[i] = digits[l%10] + } + } else { + i-- + b[i] = digits[0] + } + + mh = append(mh, b[i:]...) + mh = append(mh, _CRLF_...) + + if err := nc.bw.appendBufs(mh, hdr, data, _CRLF_BYTES_); err != nil { + nc.mu.Unlock() + return err + } + + nc.OutMsgs++ + nc.OutBytes += uint64(len(data) + len(hdr)) + + if len(nc.fch) == 0 { + nc.kickFlusher() + } + nc.mu.Unlock() + return nil +} + +// respHandler is the global response handler. It will look up +// the appropriate channel based on the last token and place +// the message on the channel if possible. +func (nc *Conn) respHandler(m *Msg) { + nc.mu.Lock() + + // Just return if closed. + if nc.isClosed() { + nc.mu.Unlock() + return + } + + var mch chan *Msg + + // Grab mch + rt := nc.respToken(m.Subject) + if rt != _EMPTY_ { + mch = nc.respMap[rt] + // Delete the key regardless, one response only. + delete(nc.respMap, rt) + } else if len(nc.respMap) == 1 { + // If the server has rewritten the subject, the response token (rt) + // will not match (could be the case with JetStream). If that is the + // case and there is a single entry, use that. + for k, v := range nc.respMap { + mch = v + delete(nc.respMap, k) + break + } + } + nc.mu.Unlock() + + // Don't block, let Request timeout instead, mch is + // buffered and we should delete the key before a + // second response is processed. + select { + case mch <- m: + default: + return + } +} + +// Helper to setup and send new request style requests. Return the chan to receive the response. +func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) { + nc.mu.Lock() + // Do setup for the new style if needed. + if nc.respMap == nil { + nc.initNewResp() + } + // Create new literal Inbox and map to a chan msg. + mch := make(chan *Msg, RequestChanLen) + respInbox := nc.newRespInbox() + token := respInbox[nc.respSubLen:] + + nc.respMap[token] = mch + if nc.respMux == nil { + // Create the response subscription we will use for all new style responses. + // This will be on an _INBOX with an additional terminal token. The subscription + // will be on a wildcard. + s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false, nil) + if err != nil { + nc.mu.Unlock() + return nil, token, err + } + nc.respScanf = strings.Replace(nc.respSub, "*", "%s", -1) + nc.respMux = s + } + nc.mu.Unlock() + + if err := nc.publish(subj, respInbox, hdr, data); err != nil { + return nil, token, err + } + + return mch, token, nil +} + +// RequestMsg will send a request payload including optional headers and deliver +// the response message, or an error, including a timeout if no message was received properly. +func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) { + if msg == nil { + return nil, ErrInvalidMsg + } + hdr, err := msg.headerBytes() + if err != nil { + return nil, err + } + + return nc.request(msg.Subject, hdr, msg.Data, timeout) +} + +// Request will send a request payload and deliver the response message, +// or an error, including a timeout if no message was received properly. +func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { + return nc.request(subj, nil, data, timeout) +} + +func (nc *Conn) useOldRequestStyle() bool { + nc.mu.RLock() + r := nc.Opts.UseOldRequestStyle + nc.mu.RUnlock() + return r +} + +func (nc *Conn) request(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + + var m *Msg + var err error + + if nc.useOldRequestStyle() { + m, err = nc.oldRequest(subj, hdr, data, timeout) + } else { + m, err = nc.newRequest(subj, hdr, data, timeout) + } + + // Check for no responder status. + if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + m, err = nil, ErrNoResponders + } + return m, err +} + +func (nc *Conn) newRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { + mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) + if err != nil { + return nil, err + } + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-t.C: + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ErrTimeout + } + + return msg, nil +} + +// oldRequest will create an Inbox and perform a Request() call +// with the Inbox reply and return the first reply received. +// This is optimized for the case of multiple responses. +func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { + inbox := nc.NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.publish(subj, inbox, hdr, data) + if err != nil { + return nil, err + } + + return s.NextMsg(timeout) +} + +// InboxPrefix is the prefix for all inbox subjects. +const ( + InboxPrefix = "_INBOX." + inboxPrefixLen = len(InboxPrefix) + replySuffixLen = 8 // Gives us 62^8 + rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + base = 62 +) + +// NewInbox will return an inbox string which can be used for directed replies from +// subscribers. These are guaranteed to be unique, but can be shared and subscribed +// to by others. +func NewInbox() string { + var b [inboxPrefixLen + nuidSize]byte + pres := b[:inboxPrefixLen] + copy(pres, InboxPrefix) + ns := b[inboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// Create a new inbox that is prefix aware. +func (nc *Conn) NewInbox() string { + if nc.Opts.InboxPrefix == _EMPTY_ { + return NewInbox() + } + + var sb strings.Builder + sb.WriteString(nc.Opts.InboxPrefix) + sb.WriteByte('.') + sb.WriteString(nuid.Next()) + return sb.String() +} + +// Function to init new response structures. +func (nc *Conn) initNewResp() { + nc.respSubPrefix = fmt.Sprintf("%s.", nc.NewInbox()) + nc.respSubLen = len(nc.respSubPrefix) + nc.respSub = fmt.Sprintf("%s*", nc.respSubPrefix) + nc.respMap = make(map[string]chan *Msg) + nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano())) +} + +// newRespInbox creates a new literal response subject +// that will trigger the mux subscription handler. +// Lock should be held. +func (nc *Conn) newRespInbox() string { + if nc.respMap == nil { + nc.initNewResp() + } + + var sb strings.Builder + sb.WriteString(nc.respSubPrefix) + + rn := nc.respRand.Int63() + for i := 0; i < replySuffixLen; i++ { + sb.WriteByte(rdigits[rn%base]) + rn /= base + } + + return sb.String() +} + +// NewRespInbox is the new format used for _INBOX. +func (nc *Conn) NewRespInbox() string { + nc.mu.Lock() + s := nc.newRespInbox() + nc.mu.Unlock() + return s +} + +// respToken will return the last token of a literal response inbox +// which we use for the message channel lookup. This needs to do a +// scan to protect itself against the server changing the subject. +// Lock should be held. +func (nc *Conn) respToken(respInbox string) string { + var token string + n, err := fmt.Sscanf(respInbox, nc.respScanf, &token) + if err != nil || n != 1 { + return "" + } + return token +} + +// Subscribe will express interest in the given subject. The subject +// can have wildcards. +// There are two type of wildcards: * for partial, and > for full. +// A subscription on subject time.*.east would receive messages sent to time.us.east and time.eu.east. +// A subscription on subject time.us.> would receive messages sent to +// time.us.east and time.us.east.atlanta, while time.us.* would only match time.us.east +// since it can't match more than one token. +// Messages will be delivered to the associated MsgHandler. +func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, cb, nil, false, nil) +} + +// ChanSubscribe will express interest in the given subject and place +// all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, nil, ch, false, nil) +} + +// ChanQueueSubscribe will express interest in the given subject. +// All subscribers with the same queue name will form the queue group +// and only one member of the group will be selected to receive any given message, +// which will be placed on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +// Note: This is the same than QueueSubscribeSyncWithChan. +func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, group, nil, ch, false, nil) +} + +// SubscribeSync will express interest on the given subject. Messages will +// be received synchronously using Subscription.NextMsg(). +func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + mch := make(chan *Msg, nc.Opts.SubChanLen) + return nc.subscribe(subj, _EMPTY_, nil, mch, true, nil) +} + +// QueueSubscribe creates an asynchronous queue subscriber on the given subject. +// All subscribers with the same queue name will form the queue group and +// only one member of the group will be selected to receive any given +// message asynchronously. +func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, queue, cb, nil, false, nil) +} + +// QueueSubscribeSync creates a synchronous queue subscriber on the given +// subject. All subscribers with the same queue name will form the queue +// group and only one member of the group will be selected to receive any +// given message synchronously using Subscription.NextMsg(). +func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { + mch := make(chan *Msg, nc.Opts.SubChanLen) + return nc.subscribe(subj, queue, nil, mch, true, nil) +} + +// QueueSubscribeSyncWithChan will express interest in the given subject. +// All subscribers with the same queue name will form the queue group +// and only one member of the group will be selected to receive any given message, +// which will be placed on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +// Note: This is the same than ChanQueueSubscribe. +func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, queue, nil, ch, false, nil) +} + +// badSubject will do quick test on whether a subject is acceptable. +// Spaces are not allowed and all tokens should be > 0 in len. +func badSubject(subj string) bool { + if strings.ContainsAny(subj, " \t\r\n") { + return true + } + tokens := strings.Split(subj, ".") + for _, t := range tokens { + if len(t) == 0 { + return true + } + } + return false +} + +// badQueue will check a queue name for whitespace. +func badQueue(qname string) bool { + return strings.ContainsAny(qname, " \t\r\n") +} + +// subscribe is the internal subscribe function that indicates interest in a subject. +func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.subscribeLocked(subj, queue, cb, ch, isSync, js) +} + +func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + if badSubject(subj) { + return nil, ErrBadSubject + } + if queue != _EMPTY_ && badQueue(queue) { + return nil, ErrBadQueueName + } + + // Check for some error conditions. + if nc.isClosed() { + return nil, ErrConnectionClosed + } + if nc.isDraining() { + return nil, ErrConnectionDraining + } + + if cb == nil && ch == nil { + return nil, ErrBadSubscription + } + + sub := &Subscription{ + Subject: subj, + Queue: queue, + mcb: cb, + conn: nc, + jsi: js, + } + // Set pending limits. + if ch != nil { + sub.pMsgsLimit = cap(ch) + } else { + sub.pMsgsLimit = DefaultSubPendingMsgsLimit + } + sub.pBytesLimit = DefaultSubPendingBytesLimit + + // If we have an async callback, start up a sub specific + // Go routine to deliver the messages. + var sr bool + if cb != nil { + sub.typ = AsyncSubscription + sub.pCond = sync.NewCond(&sub.mu) + sr = true + } else if !isSync { + sub.typ = ChanSubscription + sub.mch = ch + } else { // Sync Subscription + sub.typ = SyncSubscription + sub.mch = ch + } + + nc.subsMu.Lock() + nc.ssid++ + sub.sid = nc.ssid + nc.subs[sub.sid] = sub + nc.subsMu.Unlock() + + // Let's start the go routine now that it is fully setup and registered. + if sr { + go nc.waitForMsgs(sub) + } + + // We will send these for all subs when we reconnect + // so that we can suppress here if reconnecting. + if !nc.isReconnecting() { + nc.bw.appendString(fmt.Sprintf(subProto, subj, queue, sub.sid)) + nc.kickFlusher() + } + + return sub, nil +} + +// NumSubscriptions returns active number of subscriptions. +func (nc *Conn) NumSubscriptions() int { + nc.mu.RLock() + defer nc.mu.RUnlock() + return len(nc.subs) +} + +// Lock for nc should be held here upon entry +func (nc *Conn) removeSub(s *Subscription) { + nc.subsMu.Lock() + delete(nc.subs, s.sid) + nc.subsMu.Unlock() + s.mu.Lock() + defer s.mu.Unlock() + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + + // If JS subscription then stop HB timer. + if jsi := s.jsi; jsi != nil { + if jsi.hbc != nil { + jsi.hbc.Stop() + jsi.hbc = nil + } + if jsi.csfct != nil { + jsi.csfct.Stop() + jsi.csfct = nil + } + } + + // Mark as invalid + s.closed = true + if s.pCond != nil { + s.pCond.Broadcast() + } +} + +// SubscriptionType is the type of the Subscription. +type SubscriptionType int + +// The different types of subscription types. +const ( + AsyncSubscription = SubscriptionType(iota) + SyncSubscription + ChanSubscription + NilSubscription + PullSubscription +) + +// Type returns the type of Subscription. +func (s *Subscription) Type() SubscriptionType { + if s == nil { + return NilSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + // Pull subscriptions are really a SyncSubscription and we want this + // type to be set internally for all delivered messages management, etc.. + // So check when to return PullSubscription to the user. + if s.jsi != nil && s.jsi.pull { + return PullSubscription + } + return s.typ +} + +// IsValid returns a boolean indicating whether the subscription +// is still active. This will return false if the subscription has +// already been closed. +func (s *Subscription) IsValid() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + return s.conn != nil && !s.closed +} + +// Drain will remove interest but continue callbacks until all messages +// have been processed. +// +// For a JetStream subscription, if the library has created the JetStream +// consumer, the library will send a DeleteConsumer request to the server +// when the Drain operation completes. If a failure occurs when deleting +// the JetStream consumer, an error will be reported to the asynchronous +// error callback. +// If you do not wish the JetStream consumer to be automatically deleted, +// ensure that the consumer is not created by the library, which means +// create the consumer with AddConsumer and bind to this consumer. +func (s *Subscription) Drain() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, 0, true) +} + +// Unsubscribe will remove interest in the given subject. +// +// For a JetStream subscription, if the library has created the JetStream +// consumer, it will send a DeleteConsumer request to the server (if the +// unsubscribe itself was successful). If the delete operation fails, the +// error will be returned. +// If you do not wish the JetStream consumer to be automatically deleted, +// ensure that the consumer is not created by the library, which means +// create the consumer with AddConsumer and bind to this consumer (using +// the nats.Bind() option). +func (s *Subscription) Unsubscribe() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + closed := s.closed + dc := s.jsi != nil && s.jsi.dc + s.mu.Unlock() + if conn == nil || conn.IsClosed() { + return ErrConnectionClosed + } + if closed { + return ErrBadSubscription + } + if conn.IsDraining() { + return ErrConnectionDraining + } + err := conn.unsubscribe(s, 0, false) + if err == nil && dc { + err = s.deleteConsumer() + } + return err +} + +// checkDrained will watch for a subscription to be fully drained +// and then remove it. +func (nc *Conn) checkDrained(sub *Subscription) { + if nc == nil || sub == nil { + return + } + + // This allows us to know that whatever we have in the client pending + // is correct and the server will not send additional information. + nc.Flush() + + sub.mu.Lock() + // For JS subscriptions, check if we are going to delete the + // JS consumer when drain completes. + dc := sub.jsi != nil && sub.jsi.dc + sub.mu.Unlock() + + // Once we are here we just wait for Pending to reach 0 or + // any other state to exit this go routine. + for { + // check connection is still valid. + if nc.IsClosed() { + return + } + + // Check subscription state + sub.mu.Lock() + conn := sub.conn + closed := sub.closed + pMsgs := sub.pMsgs + sub.mu.Unlock() + + if conn == nil || closed || pMsgs == 0 { + nc.mu.Lock() + nc.removeSub(sub) + nc.mu.Unlock() + if dc { + if err := sub.deleteConsumer(); err != nil { + nc.mu.Lock() + if errCB := nc.Opts.AsyncErrorCB; errCB != nil { + nc.ach.push(func() { errCB(nc, sub, err) }) + } + nc.mu.Unlock() + } + } + return + } + + time.Sleep(100 * time.Millisecond) + } +} + +// AutoUnsubscribe will issue an automatic Unsubscribe that is +// processed by the server when max messages have been received. +// This can be useful when sending a request to an unknown number +// of subscribers. +func (s *Subscription) AutoUnsubscribe(max int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + closed := s.closed + s.mu.Unlock() + if conn == nil || closed { + return ErrBadSubscription + } + return conn.unsubscribe(s, max, false) +} + +// SetClosedHandler will set the closed handler for when a subscription +// is closed (either unsubscribed or drained). +func (s *Subscription) SetClosedHandler(handler func(subject string)) { + s.mu.Lock() + s.pDone = handler + s.mu.Unlock() +} + +// unsubscribe performs the low level unsubscribe to the server. +// Use Subscription.Unsubscribe() +func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error { + var maxStr string + if max > 0 { + sub.mu.Lock() + sub.max = uint64(max) + if sub.delivered < sub.max { + maxStr = strconv.Itoa(max) + } + sub.mu.Unlock() + } + + nc.mu.Lock() + // ok here, but defer is expensive + defer nc.mu.Unlock() + + if nc.isClosed() { + return ErrConnectionClosed + } + + nc.subsMu.RLock() + s := nc.subs[sub.sid] + nc.subsMu.RUnlock() + // Already unsubscribed + if s == nil { + return nil + } + + if maxStr == _EMPTY_ && !drainMode { + nc.removeSub(s) + } + + if drainMode { + go nc.checkDrained(sub) + } + + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + nc.bw.appendString(fmt.Sprintf(unsubProto, s.sid, maxStr)) + nc.kickFlusher() + } + + // For JetStream subscriptions cancel the attached context if there is any. + var cancel func() + sub.mu.Lock() + jsi := sub.jsi + if jsi != nil { + cancel = jsi.cancel + jsi.cancel = nil + } + sub.mu.Unlock() + if cancel != nil { + cancel() + } + + return nil +} + +// NextMsg will return the next message available to a synchronous subscriber +// or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription), +// the connection is closed (ErrConnectionClosed), the timeout is reached (ErrTimeout), +// or if there were no responders (ErrNoResponders) when used in the context of a request/reply. +func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { + if s == nil { + return nil, ErrBadSubscription + } + + s.mu.Lock() + err := s.validateNextMsgState(false) + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + // If something is available right away, let's optimize that case. + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } else { + return msg, nil + } + default: + } + + // If we are here a message was not immediately available, so lets loop + // with a timeout. + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } + case <-t.C: + return nil, ErrTimeout + } + + return msg, nil +} + +// validateNextMsgState checks whether the subscription is in a valid +// state to call NextMsg and be delivered another message synchronously. +// This should be called while holding the lock. +func (s *Subscription) validateNextMsgState(pullSubInternal bool) error { + if s.connClosed { + return ErrConnectionClosed + } + if s.mch == nil { + if s.max > 0 && s.delivered >= s.max { + return ErrMaxMessages + } else if s.closed { + return ErrBadSubscription + } + } + if s.mcb != nil { + return ErrSyncSubRequired + } + if s.sc { + s.sc = false + return ErrSlowConsumer + } + // Unless this is from an internal call, reject use of this API. + // Users should use Fetch() instead. + if !pullSubInternal && s.jsi != nil && s.jsi.pull { + return ErrTypeSubscription + } + return nil +} + +// This is called when the sync channel has been closed. +// The error returned will be either connection or subscription +// closed depending on what caused NextMsg() to fail. +func (s *Subscription) getNextMsgErr() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.connClosed { + return ErrConnectionClosed + } + return ErrBadSubscription +} + +// processNextMsgDelivered takes a message and applies the needed +// accounting to the stats from the subscription, returning an +// error in case we have the maximum number of messages have been +// delivered already. It should not be called while holding the lock. +func (s *Subscription) processNextMsgDelivered(msg *Msg) error { + s.mu.Lock() + nc := s.conn + max := s.max + + var fcReply string + // Update some stats. + s.delivered++ + delivered := s.delivered + if s.jsi != nil { + fcReply = s.checkForFlowControlResponse() + } + + if s.typ == SyncSubscription { + s.pMsgs-- + s.pBytes -= len(msg.Data) + } + s.mu.Unlock() + + if fcReply != _EMPTY_ { + nc.Publish(fcReply, nil) + } + + if max > 0 { + if delivered > max { + return ErrMaxMessages + } + // Remove subscription if we have reached max. + if delivered == max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + } + } + if len(msg.Data) == 0 && msg.Header.Get(statusHdr) == noResponders { + return ErrNoResponders + } + + return nil +} + +// Queued returns the number of queued messages in the client for this subscription. +// DEPRECATED: Use Pending() +func (s *Subscription) QueuedMsgs() (int, error) { + m, _, err := s.Pending() + return int(m), err +} + +// Pending returns the number of queued messages and queued bytes in the client for this subscription. +func (s *Subscription) Pending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgs, s.pBytes, nil +} + +// MaxPending returns the maximum number of queued messages and queued bytes seen so far. +func (s *Subscription) MaxPending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsMax, s.pBytesMax, nil +} + +// ClearMaxPending resets the maximums seen so far. +func (s *Subscription) ClearMaxPending() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + s.pMsgsMax, s.pBytesMax = 0, 0 + return nil +} + +// Pending Limits +const ( + // DefaultSubPendingMsgsLimit will be 512k msgs. + DefaultSubPendingMsgsLimit = 512 * 1024 + // DefaultSubPendingBytesLimit is 64MB + DefaultSubPendingBytesLimit = 64 * 1024 * 1024 +) + +// PendingLimits returns the current limits for this subscription. +// If no error is returned, a negative value indicates that the +// given metric is not limited. +func (s *Subscription) PendingLimits() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsLimit, s.pBytesLimit, nil +} + +// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. +// Zero is not allowed. Any negative value means that the given metric is not limited. +func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + if msgLimit == 0 || bytesLimit == 0 { + return ErrInvalidArg + } + s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit + return nil +} + +// Delivered returns the number of delivered messages for this subscription. +func (s *Subscription) Delivered() (int64, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, ErrBadSubscription + } + return int64(s.delivered), nil +} + +// Dropped returns the number of known dropped messages for this subscription. +// This will correspond to messages dropped by violations of PendingLimits. If +// the server declares the connection a SlowConsumer, this number may not be +// valid. +func (s *Subscription) Dropped() (int, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, ErrBadSubscription + } + return s.dropped, nil +} + +// Respond allows a convenient way to respond to requests in service based subscriptions. +func (m *Msg) Respond(data []byte) error { + if m == nil || m.Sub == nil { + return ErrMsgNotBound + } + if m.Reply == "" { + return ErrMsgNoReply + } + m.Sub.mu.Lock() + nc := m.Sub.conn + m.Sub.mu.Unlock() + // No need to check the connection here since the call to publish will do all the checking. + return nc.Publish(m.Reply, data) +} + +// RespondMsg allows a convenient way to respond to requests in service based subscriptions that might include headers +func (m *Msg) RespondMsg(msg *Msg) error { + if m == nil || m.Sub == nil { + return ErrMsgNotBound + } + if m.Reply == "" { + return ErrMsgNoReply + } + msg.Subject = m.Reply + m.Sub.mu.Lock() + nc := m.Sub.conn + m.Sub.mu.Unlock() + // No need to check the connection here since the call to publish will do all the checking. + return nc.PublishMsg(msg) +} + +// FIXME: This is a hack +// removeFlushEntry is needed when we need to discard queued up responses +// for our pings as part of a flush call. This happens when we have a flush +// call outstanding and we call close. +func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.pongs == nil { + return false + } + for i, c := range nc.pongs { + if c == ch { + nc.pongs[i] = nil + return true + } + } + return false +} + +// The lock must be held entering this function. +func (nc *Conn) sendPing(ch chan struct{}) { + nc.pongs = append(nc.pongs, ch) + nc.bw.appendString(pingProto) + // Flush in place. + nc.bw.flush() +} + +// This will fire periodically and send a client origin +// ping to the server. Will also check that we have received +// responses from the server. +func (nc *Conn) processPingTimer() { + nc.mu.Lock() + + if nc.status != CONNECTED { + nc.mu.Unlock() + return + } + + // Check for violation + nc.pout++ + if nc.pout > nc.Opts.MaxPingsOut { + nc.mu.Unlock() + nc.processOpErr(ErrStaleConnection) + return + } + + nc.sendPing(nil) + nc.ptmr.Reset(nc.Opts.PingInterval) + nc.mu.Unlock() +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { + if nc == nil { + return ErrInvalidConnection + } + if timeout <= 0 { + return ErrBadTimeout + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + // Create a buffered channel to prevent chan send to block + // in processPong() if this code here times out just when + // PONG was received. + ch := make(chan struct{}, 1) + nc.sendPing(ch) + nc.mu.Unlock() + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-t.C: + err = ErrTimeout + } + + if err != nil { + nc.removeFlushEntry(ch) + } + return +} + +// RTT calculates the round trip time between this client and the server. +func (nc *Conn) RTT() (time.Duration, error) { + if nc.IsClosed() { + return 0, ErrConnectionClosed + } + if nc.IsReconnecting() { + return 0, ErrDisconnected + } + start := time.Now() + if err := nc.FlushTimeout(10 * time.Second); err != nil { + return 0, err + } + return time.Since(start), nil +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (nc *Conn) Flush() error { + return nc.FlushTimeout(10 * time.Second) +} + +// Buffered will return the number of bytes buffered to be sent to the server. +// FIXME(dlc) take into account disconnected state. +func (nc *Conn) Buffered() (int, error) { + nc.mu.RLock() + defer nc.mu.RUnlock() + if nc.isClosed() || nc.bw == nil { + return -1, ErrConnectionClosed + } + return nc.bw.buffered(), nil +} + +// resendSubscriptions will send our subscription state back to the +// server. Used in reconnects +func (nc *Conn) resendSubscriptions() { + // Since we are going to send protocols to the server, we don't want to + // be holding the subsMu lock (which is used in processMsg). So copy + // the subscriptions in a temporary array. + nc.subsMu.RLock() + subs := make([]*Subscription, 0, len(nc.subs)) + for _, s := range nc.subs { + subs = append(subs, s) + } + nc.subsMu.RUnlock() + for _, s := range subs { + adjustedMax := uint64(0) + s.mu.Lock() + if s.max > 0 { + if s.delivered < s.max { + adjustedMax = s.max - s.delivered + } + // adjustedMax could be 0 here if the number of delivered msgs + // reached the max, if so unsubscribe. + if adjustedMax == 0 { + s.mu.Unlock() + nc.bw.writeDirect(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) + continue + } + } + subj, queue, sid := s.Subject, s.Queue, s.sid + s.mu.Unlock() + + nc.bw.writeDirect(fmt.Sprintf(subProto, subj, queue, sid)) + if adjustedMax > 0 { + maxStr := strconv.Itoa(int(adjustedMax)) + nc.bw.writeDirect(fmt.Sprintf(unsubProto, sid, maxStr)) + } + } +} + +// This will clear any pending flush calls and release pending calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingFlushCalls() { + // Clear any queued pongs, e.g. pending flush calls. + for _, ch := range nc.pongs { + if ch != nil { + close(ch) + } + } + nc.pongs = nil +} + +// This will clear any pending Request calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingRequestCalls() { + if nc.respMap == nil { + return + } + for key, ch := range nc.respMap { + if ch != nil { + close(ch) + delete(nc.respMap, key) + } + } +} + +// Low level close call that will do correct cleanup and set +// desired status. Also controls whether user defined callbacks +// will be triggered. The lock should not be held entering this +// function. This function will handle the locking manually. +func (nc *Conn) close(status Status, doCBs bool, err error) { + nc.mu.Lock() + if nc.isClosed() { + nc.status = status + nc.mu.Unlock() + return + } + nc.status = CLOSED + + // Kick the Go routines so they fall out. + nc.kickFlusher() + + // If the reconnect timer is waiting between a reconnect attempt, + // this will kick it out. + if nc.rqch != nil { + close(nc.rqch) + nc.rqch = nil + } + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any queued and blocking Requests. + nc.clearPendingRequestCalls() + + // Stop ping timer if set. + nc.stopPingTimer() + nc.ptmr = nil + + // Need to close and set TCP conn to nil if reconnect loop has stopped, + // otherwise we would incorrectly invoke Disconnect handler (if set) + // down below. + if nc.ar && nc.conn != nil { + nc.conn.Close() + nc.conn = nil + } else if nc.conn != nil { + // Go ahead and make sure we have flushed the outbound + nc.bw.flush() + defer nc.conn.Close() + } + + // Close sync subscriber channels and release any + // pending NextMsg() calls. + nc.subsMu.Lock() + for _, s := range nc.subs { + s.mu.Lock() + + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + // Mark as invalid, for signaling to waitForMsgs + s.closed = true + // Mark connection closed in subscription + s.connClosed = true + // If we have an async subscription, signals it to exit + if s.typ == AsyncSubscription && s.pCond != nil { + s.pCond.Signal() + } + + s.mu.Unlock() + } + nc.subs = nil + nc.subsMu.Unlock() + + nc.changeConnStatus(status) + + // Perform appropriate callback if needed for a disconnect. + if doCBs { + if nc.conn != nil { + if disconnectedErrCB := nc.Opts.DisconnectedErrCB; disconnectedErrCB != nil { + nc.ach.push(func() { disconnectedErrCB(nc, err) }) + } else if disconnectedCB := nc.Opts.DisconnectedCB; disconnectedCB != nil { + nc.ach.push(func() { disconnectedCB(nc) }) + } + } + if nc.Opts.ClosedCB != nil { + nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) + } + } + // If this is terminal, then we have to notify the asyncCB handler that + // it can exit once all async callbacks have been dispatched. + if status == CLOSED { + nc.ach.close() + } + nc.mu.Unlock() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush() and NextMsg() +func (nc *Conn) Close() { + if nc != nil { + // This will be a no-op if the connection was not websocket. + // We do this here as opposed to inside close() because we want + // to do this only for the final user-driven close of the client. + // Otherwise, we would need to change close() to pass a boolean + // indicating that this is the case. + nc.wsClose() + nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil) + } +} + +// IsClosed tests if a Conn has been closed. +func (nc *Conn) IsClosed() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isClosed() +} + +// IsReconnecting tests if a Conn is reconnecting. +func (nc *Conn) IsReconnecting() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isReconnecting() +} + +// IsConnected tests if a Conn is connected. +func (nc *Conn) IsConnected() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isConnected() +} + +// drainConnection will run in a separate Go routine and will +// flush all publishes and drain all active subscriptions. +func (nc *Conn) drainConnection() { + // Snapshot subs list. + nc.mu.Lock() + + // Check again here if we are in a state to not process. + if nc.isClosed() { + nc.mu.Unlock() + return + } + if nc.isConnecting() || nc.isReconnecting() { + nc.mu.Unlock() + // Move to closed state. + nc.Close() + return + } + + subs := make([]*Subscription, 0, len(nc.subs)) + for _, s := range nc.subs { + if s == nc.respMux { + // Skip since might be in use while messages + // are being processed (can miss responses). + continue + } + subs = append(subs, s) + } + errCB := nc.Opts.AsyncErrorCB + drainWait := nc.Opts.DrainTimeout + respMux := nc.respMux + nc.mu.Unlock() + + // for pushing errors with context. + pushErr := func(err error) { + nc.mu.Lock() + nc.err = err + if errCB != nil { + nc.ach.push(func() { errCB(nc, nil, err) }) + } + nc.mu.Unlock() + } + + // Do subs first, skip request handler if present. + for _, s := range subs { + if err := s.Drain(); err != nil { + // We will notify about these but continue. + pushErr(err) + } + } + + // Wait for the subscriptions to drop to zero. + timeout := time.Now().Add(drainWait) + var min int + if respMux != nil { + min = 1 + } else { + min = 0 + } + for time.Now().Before(timeout) { + if nc.NumSubscriptions() == min { + break + } + time.Sleep(10 * time.Millisecond) + } + + // In case there was a request/response handler + // then need to call drain at the end. + if respMux != nil { + if err := respMux.Drain(); err != nil { + // We will notify about these but continue. + pushErr(err) + } + for time.Now().Before(timeout) { + if nc.NumSubscriptions() == 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + } + + // Check if we timed out. + if nc.NumSubscriptions() != 0 { + pushErr(ErrDrainTimeout) + } + + // Flip State + nc.mu.Lock() + nc.changeConnStatus(DRAINING_PUBS) + nc.mu.Unlock() + + // Do publish drain via Flush() call. + err := nc.FlushTimeout(5 * time.Second) + if err != nil { + pushErr(err) + } + + // Move to closed state. + nc.Close() +} + +// Drain will put a connection into a drain state. All subscriptions will +// immediately be put into a drain state. Upon completion, the publishers +// will be drained and can not publish any additional messages. Upon draining +// of the publishers, the connection will be closed. Use the ClosedCB() +// option to know when the connection has moved from draining to closed. +// +// See note in Subscription.Drain for JetStream subscriptions. +func (nc *Conn) Drain() error { + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + if nc.isConnecting() || nc.isReconnecting() { + nc.mu.Unlock() + nc.Close() + return ErrConnectionReconnecting + } + if nc.isDraining() { + nc.mu.Unlock() + return nil + } + nc.changeConnStatus(DRAINING_SUBS) + go nc.drainConnection() + nc.mu.Unlock() + + return nil +} + +// IsDraining tests if a Conn is in the draining state. +func (nc *Conn) IsDraining() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isDraining() +} + +// caller must lock +func (nc *Conn) getServers(implicitOnly bool) []string { + poolSize := len(nc.srvPool) + var servers = make([]string, 0) + for i := 0; i < poolSize; i++ { + if implicitOnly && !nc.srvPool[i].isImplicit { + continue + } + url := nc.srvPool[i].url + servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) + } + return servers +} + +// Servers returns the list of known server urls, including additional +// servers discovered after a connection has been established. If +// authentication is enabled, use UserInfo or Token when connecting with +// these urls. +func (nc *Conn) Servers() []string { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.getServers(false) +} + +// DiscoveredServers returns only the server urls that have been discovered +// after a connection has been established. If authentication is enabled, +// use UserInfo or Token when connecting with these urls. +func (nc *Conn) DiscoveredServers() []string { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.getServers(true) +} + +// Status returns the current state of the connection. +func (nc *Conn) Status() Status { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.status +} + +// Test if Conn has been closed Lock is assumed held. +func (nc *Conn) isClosed() bool { + return nc.status == CLOSED +} + +// Test if Conn is in the process of connecting +func (nc *Conn) isConnecting() bool { + return nc.status == CONNECTING +} + +// Test if Conn is being reconnected. +func (nc *Conn) isReconnecting() bool { + return nc.status == RECONNECTING +} + +// Test if Conn is connected or connecting. +func (nc *Conn) isConnected() bool { + return nc.status == CONNECTED || nc.isDraining() +} + +// Test if Conn is in the draining state. +func (nc *Conn) isDraining() bool { + return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS +} + +// Test if Conn is in the draining state for pubs. +func (nc *Conn) isDrainingPubs() bool { + return nc.status == DRAINING_PUBS +} + +// Stats will return a race safe copy of the Statistics section for the connection. +func (nc *Conn) Stats() Statistics { + // Stats are updated either under connection's mu or with atomic operations + // for inbound stats in processMsg(). + nc.mu.Lock() + stats := Statistics{ + InMsgs: atomic.LoadUint64(&nc.InMsgs), + InBytes: atomic.LoadUint64(&nc.InBytes), + OutMsgs: nc.OutMsgs, + OutBytes: nc.OutBytes, + Reconnects: nc.Reconnects, + } + nc.mu.Unlock() + return stats +} + +// MaxPayload returns the size limit that a message payload can have. +// This is set by the server configuration and delivered to the client +// upon connect. +func (nc *Conn) MaxPayload() int64 { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.MaxPayload +} + +// HeadersSupported will return if the server supports headers +func (nc *Conn) HeadersSupported() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.Headers +} + +// AuthRequired will return if the connected server requires authorization. +func (nc *Conn) AuthRequired() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.AuthRequired +} + +// TLSRequired will return if the connected server requires TLS connections. +func (nc *Conn) TLSRequired() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.TLSRequired +} + +// Barrier schedules the given function `f` to all registered asynchronous +// subscriptions. +// Only the last subscription to see this barrier will invoke the function. +// If no subscription is registered at the time of this call, `f()` is invoked +// right away. +// ErrConnectionClosed is returned if the connection is closed prior to +// the call. +func (nc *Conn) Barrier(f func()) error { + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + nc.subsMu.Lock() + // Need to figure out how many non chan subscriptions there are + numSubs := 0 + for _, sub := range nc.subs { + if sub.typ == AsyncSubscription { + numSubs++ + } + } + if numSubs == 0 { + nc.subsMu.Unlock() + nc.mu.Unlock() + f() + return nil + } + barrier := &barrierInfo{refs: int64(numSubs), f: f} + for _, sub := range nc.subs { + sub.mu.Lock() + if sub.mch == nil { + msg := &Msg{barrier: barrier} + // Push onto the async pList + if sub.pTail != nil { + sub.pTail.next = msg + } else { + sub.pHead = msg + sub.pCond.Signal() + } + sub.pTail = msg + } + sub.mu.Unlock() + } + nc.subsMu.Unlock() + nc.mu.Unlock() + return nil +} + +// GetClientIP returns the client IP as known by the server. +// Supported as of server version 2.1.6. +func (nc *Conn) GetClientIP() (net.IP, error) { + nc.mu.RLock() + defer nc.mu.RUnlock() + if nc.isClosed() { + return nil, ErrConnectionClosed + } + if nc.info.ClientIP == "" { + return nil, ErrClientIPNotSupported + } + ip := net.ParseIP(nc.info.ClientIP) + return ip, nil +} + +// GetClientID returns the client ID assigned by the server to which +// the client is currently connected to. Note that the value may change if +// the client reconnects. +// This function returns ErrClientIDNotSupported if the server is of a +// version prior to 1.2.0. +func (nc *Conn) GetClientID() (uint64, error) { + nc.mu.RLock() + defer nc.mu.RUnlock() + if nc.isClosed() { + return 0, ErrConnectionClosed + } + if nc.info.CID == 0 { + return 0, ErrClientIDNotSupported + } + return nc.info.CID, nil +} + +// StatusChanged returns a channel on which given list of connection status changes will be reported. +// If no statuses are provided, defaults will be used: CONNECTED, RECONNECTING, DISCONNECTED, CLOSED. +func (nc *Conn) StatusChanged(statuses ...Status) chan Status { + if len(statuses) == 0 { + statuses = []Status{CONNECTED, RECONNECTING, DISCONNECTED, CLOSED} + } + ch := make(chan Status, 10) + for _, s := range statuses { + nc.registerStatusChangeListener(s, ch) + } + return ch +} + +// registerStatusChangeListener registers a channel waiting for a specific status change event. +// Status change events are non-blocking - if no receiver is waiting for the status change, +// it will not be sent on the channel. Closed channels are ignored. +func (nc *Conn) registerStatusChangeListener(status Status, ch chan Status) { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.statListeners == nil { + nc.statListeners = make(map[Status][]chan Status) + } + if _, ok := nc.statListeners[status]; !ok { + nc.statListeners[status] = make([]chan Status, 0) + } + nc.statListeners[status] = append(nc.statListeners[status], ch) +} + +// sendStatusEvent sends connection status event to all channels. +// If channel is closed, or there is no listener, sendStatusEvent +// will not block. Lock should be held entering. +func (nc *Conn) sendStatusEvent(s Status) { +Loop: + for i := 0; i < len(nc.statListeners[s]); i++ { + // make sure channel is not closed + select { + case <-nc.statListeners[s][i]: + // if chan is closed, remove it + nc.statListeners[s][i] = nc.statListeners[s][len(nc.statListeners[s])-1] + nc.statListeners[s] = nc.statListeners[s][:len(nc.statListeners[s])-1] + i-- + continue Loop + default: + } + // only send event if someone's listening + select { + case nc.statListeners[s][i] <- s: + default: + } + } +} + +// changeConnStatus changes connections status and sends events +// to all listeners. Lock should be held entering. +func (nc *Conn) changeConnStatus(status Status) { + if nc == nil { + return + } + nc.sendStatusEvent(status) + nc.status = status +} + +// NkeyOptionFromSeed will load an nkey pair from a seed file. +// It will return the NKey Option and will handle +// signing of nonce challenges from the server. It will take +// care to not hold keys in memory and to wipe memory. +func NkeyOptionFromSeed(seedFile string) (Option, error) { + kp, err := nkeyPairFromSeedFile(seedFile) + if err != nil { + return nil, err + } + // Wipe our key on exit. + defer kp.Wipe() + + pub, err := kp.PublicKey() + if err != nil { + return nil, err + } + if !nkeys.IsValidPublicUserKey(pub) { + return nil, fmt.Errorf("nats: Not a valid nkey user seed") + } + sigCB := func(nonce []byte) ([]byte, error) { + return sigHandler(nonce, seedFile) + } + return Nkey(string(pub), sigCB), nil +} + +// Just wipe slice with 'x', for clearing contents of creds or nkey seed file. +func wipeSlice(buf []byte) { + for i := range buf { + buf[i] = 'x' + } +} + +func userFromFile(userFile string) (string, error) { + path, err := expandPath(userFile) + if err != nil { + return _EMPTY_, fmt.Errorf("nats: %w", err) + } + + contents, err := os.ReadFile(path) + if err != nil { + return _EMPTY_, fmt.Errorf("nats: %w", err) + } + defer wipeSlice(contents) + return nkeys.ParseDecoratedJWT(contents) +} + +func homeDir() (string, error) { + if runtime.GOOS == "windows" { + homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") + userProfile := os.Getenv("USERPROFILE") + + var home string + if homeDrive == "" || homePath == "" { + if userProfile == "" { + return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%") + } + home = userProfile + } else { + home = filepath.Join(homeDrive, homePath) + } + + return home, nil + } + + home := os.Getenv("HOME") + if home == "" { + return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME") + } + return home, nil +} + +func expandPath(p string) (string, error) { + p = os.ExpandEnv(p) + + if !strings.HasPrefix(p, "~") { + return p, nil + } + + home, err := homeDir() + if err != nil { + return _EMPTY_, err + } + + return filepath.Join(home, p[1:]), nil +} + +func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) { + contents, err := os.ReadFile(seedFile) + if err != nil { + return nil, fmt.Errorf("nats: %w", err) + } + defer wipeSlice(contents) + return nkeys.ParseDecoratedNKey(contents) +} + +// Sign authentication challenges from the server. +// Do not keep private seed in memory. +func sigHandler(nonce []byte, seedFile string) ([]byte, error) { + kp, err := nkeyPairFromSeedFile(seedFile) + if err != nil { + return nil, fmt.Errorf("unable to extract key pair from file %q: %w", seedFile, err) + } + // Wipe our key on exit. + defer kp.Wipe() + + sig, _ := kp.Sign(nonce) + return sig, nil +} + +type timeoutWriter struct { + timeout time.Duration + conn net.Conn + err error +} + +// Write implements the io.Writer interface. +func (tw *timeoutWriter) Write(p []byte) (int, error) { + if tw.err != nil { + return 0, tw.err + } + + var n int + tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout)) + n, tw.err = tw.conn.Write(p) + tw.conn.SetWriteDeadline(time.Time{}) + return n, tw.err +} diff --git a/vendor/github.com/nats-io/nats.go/netchan.go b/vendor/github.com/nats-io/nats.go/netchan.go new file mode 100644 index 00000000..060721eb --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/netchan.go @@ -0,0 +1,111 @@ +// Copyright 2013-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "errors" + "reflect" +) + +// This allows the functionality for network channels by binding send and receive Go chans +// to subjects and optionally queue groups. +// Data will be encoded and decoded via the EncodedConn and its associated encoders. + +// BindSendChan binds a channel for send operations to NATS. +func (c *EncodedConn) BindSendChan(subject string, channel any) error { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return ErrChanArg + } + go chPublish(c, chVal, subject) + return nil +} + +// Publish all values that arrive on the channel until it is closed or we +// encounter an error. +func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { + for { + val, ok := chVal.Recv() + if !ok { + // Channel has most likely been closed. + return + } + if e := c.Publish(subject, val.Interface()); e != nil { + // Do this under lock. + c.Conn.mu.Lock() + defer c.Conn.mu.Unlock() + + if c.Conn.Opts.AsyncErrorCB != nil { + // FIXME(dlc) - Not sure this is the right thing to do. + // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback + if c.Conn.isClosed() { + go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) + } else { + c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }) + } + } + return + } + } +} + +// BindRecvChan binds a channel for receive operations from NATS. +func (c *EncodedConn) BindRecvChan(subject string, channel any) (*Subscription, error) { + return c.bindRecvChan(subject, _EMPTY_, channel) +} + +// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. +func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel any) (*Subscription, error) { + return c.bindRecvChan(subject, queue, channel) +} + +// Internal function to bind receive operations for a channel. +func (c *EncodedConn) bindRecvChan(subject, queue string, channel any) (*Subscription, error) { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return nil, ErrChanArg + } + argType := chVal.Type().Elem() + + cb := func(m *Msg) { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }) + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + // This is a bit hacky, but in this instance we may be trying to send to a closed channel. + // and the user does not know when it is safe to close the channel. + defer func() { + // If we have panicked, recover and close the subscription. + if r := recover(); r != nil { + m.Sub.Unsubscribe() + } + }() + // Actually do the send to the channel. + chVal.Send(oPtr) + } + + return c.Conn.subscribe(subject, queue, cb, nil, false, nil) +} diff --git a/vendor/github.com/nats-io/nats.go/object.go b/vendor/github.com/nats-io/nats.go/object.go new file mode 100644 index 00000000..f6ba8fb1 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/object.go @@ -0,0 +1,1386 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "net" + "os" + "strings" + "sync" + "time" + + "github.com/nats-io/nats.go/internal/parser" + "github.com/nats-io/nuid" +) + +// ObjectStoreManager creates, loads and deletes Object Stores +type ObjectStoreManager interface { + // ObjectStore will look up and bind to an existing object store instance. + ObjectStore(bucket string) (ObjectStore, error) + // CreateObjectStore will create an object store. + CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) + // DeleteObjectStore will delete the underlying stream for the named object. + DeleteObjectStore(bucket string) error + // ObjectStoreNames is used to retrieve a list of bucket names + ObjectStoreNames(opts ...ObjectOpt) <-chan string + // ObjectStores is used to retrieve a list of bucket statuses + ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus +} + +// ObjectStore is a blob store capable of storing large objects efficiently in +// JetStream streams +type ObjectStore interface { + // Put will place the contents from the reader into a new object. + Put(obj *ObjectMeta, reader io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) + // Get will pull the named object from the object store. + Get(name string, opts ...GetObjectOpt) (ObjectResult, error) + + // PutBytes is convenience function to put a byte slice into this object store. + PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) + // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. + GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) + + // PutString is convenience function to put a string into this object store. + PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) + // GetString is a convenience function to pull an object from this object store and return it as a string. + GetString(name string, opts ...GetObjectOpt) (string, error) + + // PutFile is convenience function to put a file into this object store. + PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) + // GetFile is a convenience function to pull an object from this object store and place it in a file. + GetFile(name, file string, opts ...GetObjectOpt) error + + // GetInfo will retrieve the current information for the object. + GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) + // UpdateMeta will update the metadata for the object. + UpdateMeta(name string, meta *ObjectMeta) error + + // Delete will delete the named object. + Delete(name string) error + + // AddLink will add a link to another object. + AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) + + // AddBucketLink will add a link to another object store. + AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) + + // Seal will seal the object store, no further modifications will be allowed. + Seal() error + + // Watch for changes in the underlying store and receive meta information updates. + Watch(opts ...WatchOpt) (ObjectWatcher, error) + + // List will list all the objects in this store. + List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) + + // Status retrieves run-time status about the backing store of the bucket. + Status() (ObjectStoreStatus, error) +} + +type ObjectOpt interface { + configureObject(opts *objOpts) error +} + +type objOpts struct { + ctx context.Context +} + +// For nats.Context() support. +func (ctx ContextOpt) configureObject(opts *objOpts) error { + opts.ctx = ctx + return nil +} + +// ObjectWatcher is what is returned when doing a watch. +type ObjectWatcher interface { + // Updates returns a channel to read any updates to entries. + Updates() <-chan *ObjectInfo + // Stop will stop this watcher. + Stop() error +} + +var ( + ErrObjectConfigRequired = errors.New("nats: object-store config required") + ErrBadObjectMeta = errors.New("nats: object-store meta information invalid") + ErrObjectNotFound = errors.New("nats: object not found") + ErrInvalidStoreName = errors.New("nats: invalid object-store name") + ErrDigestMismatch = errors.New("nats: received a corrupt object, digests do not match") + ErrInvalidDigestFormat = errors.New("nats: object digest hash has invalid format") + ErrNoObjectsFound = errors.New("nats: no objects found") + ErrObjectAlreadyExists = errors.New("nats: an object already exists with that name") + ErrNameRequired = errors.New("nats: name is required") + ErrNeeds262 = errors.New("nats: object-store requires at least server version 2.6.2") + ErrLinkNotAllowed = errors.New("nats: link cannot be set when putting the object in bucket") + ErrObjectRequired = errors.New("nats: object required") + ErrNoLinkToDeleted = errors.New("nats: not allowed to link to a deleted object") + ErrNoLinkToLink = errors.New("nats: not allowed to link to another link") + ErrCantGetBucket = errors.New("nats: invalid Get, object is a link to a bucket") + ErrBucketRequired = errors.New("nats: bucket required") + ErrBucketMalformed = errors.New("nats: bucket malformed") + ErrUpdateMetaDeleted = errors.New("nats: cannot update meta for a deleted object") +) + +// ObjectStoreConfig is the config for the object store. +type ObjectStoreConfig struct { + Bucket string `json:"bucket"` + Description string `json:"description,omitempty"` + TTL time.Duration `json:"max_age,omitempty"` + MaxBytes int64 `json:"max_bytes,omitempty"` + Storage StorageType `json:"storage,omitempty"` + Replicas int `json:"num_replicas,omitempty"` + Placement *Placement `json:"placement,omitempty"` + + // Bucket-specific metadata + // NOTE: Metadata requires nats-server v2.10.0+ + Metadata map[string]string `json:"metadata,omitempty"` +} + +type ObjectStoreStatus interface { + // Bucket is the name of the bucket + Bucket() string + // Description is the description supplied when creating the bucket + Description() string + // TTL indicates how long objects are kept in the bucket + TTL() time.Duration + // Storage indicates the underlying JetStream storage technology used to store data + Storage() StorageType + // Replicas indicates how many storage replicas are kept for the data in the bucket + Replicas() int + // Sealed indicates the stream is sealed and cannot be modified in any way + Sealed() bool + // Size is the combined size of all data in the bucket including metadata, in bytes + Size() uint64 + // BackingStore provides details about the underlying storage + BackingStore() string + // Metadata is the user supplied metadata for the bucket + Metadata() map[string]string +} + +// ObjectMetaOptions +type ObjectMetaOptions struct { + Link *ObjectLink `json:"link,omitempty"` + ChunkSize uint32 `json:"max_chunk_size,omitempty"` +} + +// ObjectMeta is high level information about an object. +type ObjectMeta struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Headers Header `json:"headers,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + + // Optional options. + Opts *ObjectMetaOptions `json:"options,omitempty"` +} + +// ObjectInfo is meta plus instance information. +type ObjectInfo struct { + ObjectMeta + Bucket string `json:"bucket"` + NUID string `json:"nuid"` + Size uint64 `json:"size"` + ModTime time.Time `json:"mtime"` + Chunks uint32 `json:"chunks"` + Digest string `json:"digest,omitempty"` + Deleted bool `json:"deleted,omitempty"` +} + +// ObjectLink is used to embed links to other buckets and objects. +type ObjectLink struct { + // Bucket is the name of the other object store. + Bucket string `json:"bucket"` + // Name can be used to link to a single object. + // If empty means this is a link to the whole store, like a directory. + Name string `json:"name,omitempty"` +} + +// ObjectResult will return the underlying stream info and also be an io.ReadCloser. +type ObjectResult interface { + io.ReadCloser + Info() (*ObjectInfo, error) + Error() error +} + +const ( + objNameTmpl = "OBJ_%s" // OBJ_ // stream name + objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject + objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject + objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject + objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject + objNoPending = "0" + objDefaultChunkSize = uint32(128 * 1024) // 128k + objDigestType = "SHA-256=" + objDigestTmpl = objDigestType + "%s" +) + +type obs struct { + name string + stream string + js *js +} + +// CreateObjectStore will create an object store. +func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) { + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, ErrNeeds262 + } + if cfg == nil { + return nil, ErrObjectConfigRequired + } + if !validBucketRe.MatchString(cfg.Bucket) { + return nil, ErrInvalidStoreName + } + + name := cfg.Bucket + chunks := fmt.Sprintf(objAllChunksPreTmpl, name) + meta := fmt.Sprintf(objAllMetaPreTmpl, name) + + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + // See kv + replicas := cfg.Replicas + if replicas == 0 { + replicas = 1 + } + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + + scfg := &StreamConfig{ + Name: fmt.Sprintf(objNameTmpl, name), + Description: cfg.Description, + Subjects: []string{chunks, meta}, + MaxAge: cfg.TTL, + MaxBytes: maxBytes, + Storage: cfg.Storage, + Replicas: replicas, + Placement: cfg.Placement, + Discard: DiscardNew, + AllowRollup: true, + AllowDirect: true, + Metadata: cfg.Metadata, + } + + // Create our stream. + _, err := js.AddStream(scfg) + if err != nil { + return nil, err + } + + return &obs{name: name, stream: scfg.Name, js: js}, nil +} + +// ObjectStore will look up and bind to an existing object store instance. +func (js *js) ObjectStore(bucket string) (ObjectStore, error) { + if !validBucketRe.MatchString(bucket) { + return nil, ErrInvalidStoreName + } + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, ErrNeeds262 + } + + stream := fmt.Sprintf(objNameTmpl, bucket) + si, err := js.StreamInfo(stream) + if err != nil { + return nil, err + } + return &obs{name: bucket, stream: si.Config.Name, js: js}, nil +} + +// DeleteObjectStore will delete the underlying stream for the named object. +func (js *js) DeleteObjectStore(bucket string) error { + stream := fmt.Sprintf(objNameTmpl, bucket) + return js.DeleteStream(stream) +} + +func encodeName(name string) string { + return base64.URLEncoding.EncodeToString([]byte(name)) +} + +// Put will place the contents from the reader into this object-store. +func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) { + if meta == nil || meta.Name == "" { + return nil, ErrBadObjectMeta + } + + if meta.Opts == nil { + meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} + } else if meta.Opts.Link != nil { + return nil, ErrLinkNotAllowed + } else if meta.Opts.ChunkSize == 0 { + meta.Opts.ChunkSize = objDefaultChunkSize + } + + var o objOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureObject(&o); err != nil { + return nil, err + } + } + } + ctx := o.ctx + + // Create the new nuid so chunks go on a new subject if the name is re-used + newnuid := nuid.Next() + + // These will be used in more than one place + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) + + // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem + // Chunks on the old nuid can be cleaned up at the end + einfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name + if err != nil && err != ErrObjectNotFound { + return nil, err + } + + // For async error handling + var perr error + var mu sync.Mutex + setErr := func(err error) { + mu.Lock() + defer mu.Unlock() + perr = err + } + getErr := func() error { + mu.Lock() + defer mu.Unlock() + return perr + } + + // Create our own JS context to handle errors etc. + jetStream, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) })) + if err != nil { + return nil, err + } + + defer jetStream.(*js).cleanupReplySub() + + purgePartial := func() { + // wait until all pubs are complete or up to default timeout before attempting purge + select { + case <-jetStream.PublishAsyncComplete(): + case <-time.After(obs.js.opts.wait): + } + obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) + } + + m, h := NewMsg(chunkSubj), sha256.New() + chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) + + // set up the info object. The chunk upload sets the size and digest + info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: *meta} + + for r != nil { + if ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + err = ctx.Err() + } else { + err = ErrTimeout + } + default: + } + if err != nil { + purgePartial() + return nil, err + } + } + + // Actual read. + // TODO(dlc) - Deadline? + n, readErr := r.Read(chunk) + + // Handle all non EOF errors + if readErr != nil && readErr != io.EOF { + purgePartial() + return nil, readErr + } + + // Add chunk only if we received data + if n > 0 { + // Chunk processing. + m.Data = chunk[:n] + h.Write(m.Data) + + // Send msg itself. + if _, err := jetStream.PublishMsgAsync(m); err != nil { + purgePartial() + return nil, err + } + if err := getErr(); err != nil { + purgePartial() + return nil, err + } + // Update totals. + sent++ + total += uint64(n) + } + + // EOF Processing. + if readErr == io.EOF { + // Place meta info. + info.Size, info.Chunks = uint64(total), uint32(sent) + info.Digest = GetObjectDigestValue(h) + break + } + } + + // Prepare the meta message + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) + mm := NewMsg(metaSubj) + mm.Header.Set(MsgRollup, MsgRollupSubject) + mm.Data, err = json.Marshal(info) + if err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + + // Publish the meta message. + _, err = jetStream.PublishMsgAsync(mm) + if err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + + // Wait for all to be processed. + select { + case <-jetStream.PublishAsyncComplete(): + if err := getErr(); err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + case <-time.After(obs.js.opts.wait): + return nil, ErrTimeout + } + + info.ModTime = time.Now().UTC() // This time is not actually the correct time + + // Delete any original chunks. + if einfo != nil && !einfo.Deleted { + echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) + obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: echunkSubj}) + } + + // TODO would it be okay to do this to return the info with the correct time? + // With the understanding that it is an extra call to the server. + // Otherwise the time the user gets back is the client time, not the server time. + // return obs.GetInfo(info.Name) + + return info, nil +} + +// GetObjectDigestValue calculates the base64 value of hashed data +func GetObjectDigestValue(data hash.Hash) string { + sha := data.Sum(nil) + return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) +} + +// DecodeObjectDigest decodes base64 hash +func DecodeObjectDigest(data string) ([]byte, error) { + digest := strings.SplitN(data, "=", 2) + if len(digest) != 2 { + return nil, ErrInvalidDigestFormat + } + return base64.URLEncoding.DecodeString(digest[1]) +} + +// ObjectResult impl. +type objResult struct { + sync.Mutex + info *ObjectInfo + r io.ReadCloser + err error + ctx context.Context + digest hash.Hash +} + +func (info *ObjectInfo) isLink() bool { + return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil +} + +type GetObjectOpt interface { + configureGetObject(opts *getObjectOpts) error +} +type getObjectOpts struct { + ctx context.Context + // Include deleted object in the result. + showDeleted bool +} + +type getObjectFn func(opts *getObjectOpts) error + +func (opt getObjectFn) configureGetObject(opts *getObjectOpts) error { + return opt(opts) +} + +// GetObjectShowDeleted makes Get() return object if it was marked as deleted. +func GetObjectShowDeleted() GetObjectOpt { + return getObjectFn(func(opts *getObjectOpts) error { + opts.showDeleted = true + return nil + }) +} + +// For nats.Context() support. +func (ctx ContextOpt) configureGetObject(opts *getObjectOpts) error { + opts.ctx = ctx + return nil +} + +// Get will pull the object from the underlying stream. +func (obs *obs) Get(name string, opts ...GetObjectOpt) (ObjectResult, error) { + var o getObjectOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureGetObject(&o); err != nil { + return nil, err + } + } + } + ctx := o.ctx + infoOpts := make([]GetObjectInfoOpt, 0) + if ctx != nil { + infoOpts = append(infoOpts, Context(ctx)) + } + if o.showDeleted { + infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) + } + + // Grab meta info. + info, err := obs.GetInfo(name, infoOpts...) + if err != nil { + return nil, err + } + if info.NUID == _EMPTY_ { + return nil, ErrBadObjectMeta + } + + // Check for object links. If single objects we do a pass through. + if info.isLink() { + if info.ObjectMeta.Opts.Link.Name == _EMPTY_ { + return nil, ErrCantGetBucket + } + + // is the link in the same bucket? + lbuck := info.ObjectMeta.Opts.Link.Bucket + if lbuck == obs.name { + return obs.Get(info.ObjectMeta.Opts.Link.Name) + } + + // different bucket + lobs, err := obs.js.ObjectStore(lbuck) + if err != nil { + return nil, err + } + return lobs.Get(info.ObjectMeta.Opts.Link.Name) + } + + result := &objResult{info: info, ctx: ctx} + if info.Size == 0 { + return result, nil + } + + pr, pw := net.Pipe() + result.r = pr + + gotErr := func(m *Msg, err error) { + pw.Close() + m.Sub.Unsubscribe() + result.setErr(err) + } + + // For calculating sum256 + result.digest = sha256.New() + + processChunk := func(m *Msg) { + var err error + if ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + err = ctx.Err() + } else { + err = ErrTimeout + } + default: + } + if err != nil { + gotErr(m, err) + return + } + } + + tokens, err := parser.GetMetadataFields(m.Reply) + if err != nil { + gotErr(m, err) + return + } + + // Write to our pipe. + for b := m.Data; len(b) > 0; { + n, err := pw.Write(b) + if err != nil { + gotErr(m, err) + return + } + b = b[n:] + } + // Update sha256 + result.digest.Write(m.Data) + + // Check if we are done. + if tokens[parser.AckNumPendingTokenPos] == objNoPending { + pw.Close() + m.Sub.Unsubscribe() + } + } + + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) + _, err = obs.js.Subscribe(chunkSubj, processChunk, OrderedConsumer()) + if err != nil { + return nil, err + } + + return result, nil +} + +// Delete will delete the object. +func (obs *obs) Delete(name string) error { + // Grab meta info. + info, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) + if err != nil { + return err + } + if info.NUID == _EMPTY_ { + return ErrBadObjectMeta + } + + // Place a rollup delete marker and publish the info + info.Deleted = true + info.Size, info.Chunks, info.Digest = 0, 0, _EMPTY_ + + if err = publishMeta(info, obs.js); err != nil { + return err + } + + // Purge chunks for the object. + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) + return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) +} + +func publishMeta(info *ObjectInfo, js JetStreamContext) error { + // marshal the object into json, don't store an actual time + info.ModTime = time.Time{} + data, err := json.Marshal(info) + if err != nil { + return err + } + + // Prepare and publish the message. + mm := NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) + mm.Header.Set(MsgRollup, MsgRollupSubject) + mm.Data = data + if _, err := js.PublishMsg(mm); err != nil { + return err + } + + // set the ModTime in case it's returned to the user, even though it's not the correct time. + info.ModTime = time.Now().UTC() + return nil +} + +// AddLink will add a link to another object if it's not deleted and not another link +// name is the name of this link object +// obj is what is being linked too +func (obs *obs) AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) { + if name == "" { + return nil, ErrNameRequired + } + + // TODO Handle stale info + + if obj == nil || obj.Name == "" { + return nil, ErrObjectRequired + } + if obj.Deleted { + return nil, ErrNoLinkToDeleted + } + if obj.isLink() { + return nil, ErrNoLinkToLink + } + + // If object with link's name is found, error. + // If link with link's name is found, that's okay to overwrite. + // If there was an error that was not ErrObjectNotFound, error. + einfo, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) + if einfo != nil { + if !einfo.isLink() { + return nil, ErrObjectAlreadyExists + } + } else if err != ErrObjectNotFound { + return nil, err + } + + // create the meta for the link + meta := &ObjectMeta{ + Name: name, + Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, + } + info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} + + // put the link object + if err = publishMeta(info, obs.js); err != nil { + return nil, err + } + + return info, nil +} + +// AddBucketLink will add a link to another object store. +func (ob *obs) AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) { + if name == "" { + return nil, ErrNameRequired + } + if bucket == nil { + return nil, ErrBucketRequired + } + bos, ok := bucket.(*obs) + if !ok { + return nil, ErrBucketMalformed + } + + // If object with link's name is found, error. + // If link with link's name is found, that's okay to overwrite. + // If there was an error that was not ErrObjectNotFound, error. + einfo, err := ob.GetInfo(name, GetObjectInfoShowDeleted()) + if einfo != nil { + if !einfo.isLink() { + return nil, ErrObjectAlreadyExists + } + } else if err != ErrObjectNotFound { + return nil, err + } + + // create the meta for the link + meta := &ObjectMeta{ + Name: name, + Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, + } + info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} + + // put the link object + err = publishMeta(info, ob.js) + if err != nil { + return nil, err + } + + return info, nil +} + +// PutBytes is convenience function to put a byte slice into this object store. +func (obs *obs) PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) { + return obs.Put(&ObjectMeta{Name: name}, bytes.NewReader(data), opts...) +} + +// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. +func (obs *obs) GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) { + result, err := obs.Get(name, opts...) + if err != nil { + return nil, err + } + defer result.Close() + + var b bytes.Buffer + if _, err := b.ReadFrom(result); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// PutString is convenience function to put a string into this object store. +func (obs *obs) PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) { + return obs.Put(&ObjectMeta{Name: name}, strings.NewReader(data), opts...) +} + +// GetString is a convenience function to pull an object from this object store and return it as a string. +func (obs *obs) GetString(name string, opts ...GetObjectOpt) (string, error) { + result, err := obs.Get(name, opts...) + if err != nil { + return _EMPTY_, err + } + defer result.Close() + + var b bytes.Buffer + if _, err := b.ReadFrom(result); err != nil { + return _EMPTY_, err + } + return b.String(), nil +} + +// PutFile is convenience function to put a file into an object store. +func (obs *obs) PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + return obs.Put(&ObjectMeta{Name: file}, f, opts...) +} + +// GetFile is a convenience function to pull and object and place in a file. +func (obs *obs) GetFile(name, file string, opts ...GetObjectOpt) error { + // Expect file to be new. + f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + result, err := obs.Get(name, opts...) + if err != nil { + os.Remove(f.Name()) + return err + } + defer result.Close() + + // Stream copy to the file. + _, err = io.Copy(f, result) + return err +} + +type GetObjectInfoOpt interface { + configureGetInfo(opts *getObjectInfoOpts) error +} +type getObjectInfoOpts struct { + ctx context.Context + // Include deleted object in the result. + showDeleted bool +} + +type getObjectInfoFn func(opts *getObjectInfoOpts) error + +func (opt getObjectInfoFn) configureGetInfo(opts *getObjectInfoOpts) error { + return opt(opts) +} + +// GetObjectInfoShowDeleted makes GetInfo() return object if it was marked as deleted. +func GetObjectInfoShowDeleted() GetObjectInfoOpt { + return getObjectInfoFn(func(opts *getObjectInfoOpts) error { + opts.showDeleted = true + return nil + }) +} + +// For nats.Context() support. +func (ctx ContextOpt) configureGetInfo(opts *getObjectInfoOpts) error { + opts.ctx = ctx + return nil +} + +// GetInfo will retrieve the current information for the object. +func (obs *obs) GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { + // Grab last meta value we have. + if name == "" { + return nil, ErrNameRequired + } + var o getObjectInfoOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureGetInfo(&o); err != nil { + return nil, err + } + } + } + + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call + stream := fmt.Sprintf(objNameTmpl, obs.name) + + m, err := obs.js.GetLastMsg(stream, metaSubj) + if err != nil { + if err == ErrMsgNotFound { + err = ErrObjectNotFound + } + return nil, err + } + var info ObjectInfo + if err := json.Unmarshal(m.Data, &info); err != nil { + return nil, ErrBadObjectMeta + } + if !o.showDeleted && info.Deleted { + return nil, ErrObjectNotFound + } + info.ModTime = m.Time + return &info, nil +} + +// UpdateMeta will update the meta for the object. +func (obs *obs) UpdateMeta(name string, meta *ObjectMeta) error { + if meta == nil { + return ErrBadObjectMeta + } + + // Grab the current meta. + info, err := obs.GetInfo(name) + if err != nil { + if errors.Is(err, ErrObjectNotFound) { + return ErrUpdateMetaDeleted + } + return err + } + + // If the new name is different from the old, and it exists, error + // If there was an error that was not ErrObjectNotFound, error. + if name != meta.Name { + existingInfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return err + } + if err == nil && !existingInfo.Deleted { + return ErrObjectAlreadyExists + } + } + + // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) + // These should only be updated internally when appropriate. + info.Name = meta.Name + info.Description = meta.Description + info.Headers = meta.Headers + info.Metadata = meta.Metadata + + // Prepare the meta message + if err = publishMeta(info, obs.js); err != nil { + return err + } + + // did the name of this object change? We just stored the meta under the new name + // so delete the meta from the old name via purge stream for subject + if name != meta.Name { + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) + return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: metaSubj}) + } + + return nil +} + +// Seal will seal the object store, no further modifications will be allowed. +func (obs *obs) Seal() error { + stream := fmt.Sprintf(objNameTmpl, obs.name) + si, err := obs.js.StreamInfo(stream) + if err != nil { + return err + } + // Seal the stream from being able to take on more messages. + cfg := si.Config + cfg.Sealed = true + _, err = obs.js.UpdateStream(&cfg) + return err +} + +// Implementation for Watch +type objWatcher struct { + updates chan *ObjectInfo + sub *Subscription +} + +// Updates returns the interior channel. +func (w *objWatcher) Updates() <-chan *ObjectInfo { + if w == nil { + return nil + } + return w.updates +} + +// Stop will unsubscribe from the watcher. +func (w *objWatcher) Stop() error { + if w == nil { + return nil + } + return w.sub.Unsubscribe() +} + +// Watch for changes in the underlying store and receive meta information updates. +func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) { + var o watchOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureWatcher(&o); err != nil { + return nil, err + } + } + } + + var initDoneMarker bool + + w := &objWatcher{updates: make(chan *ObjectInfo, 32)} + + update := func(m *Msg) { + var info ObjectInfo + if err := json.Unmarshal(m.Data, &info); err != nil { + return // TODO(dlc) - Communicate this upwards? + } + meta, err := m.Metadata() + if err != nil { + return + } + + if !o.ignoreDeletes || !info.Deleted { + info.ModTime = meta.Timestamp + w.updates <- &info + } + + // if UpdatesOnly is set, no not send nil to the channel + // as it would always be triggered after initializing the watcher + if !initDoneMarker && meta.NumPending == 0 { + initDoneMarker = true + w.updates <- nil + } + } + + allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) + _, err := obs.js.GetLastMsg(obs.stream, allMeta) + // if there are no messages on the stream and we are not watching + // updates only, send nil to the channel to indicate that the initial + // watch is done + if !o.updatesOnly { + if errors.Is(err, ErrMsgNotFound) { + initDoneMarker = true + w.updates <- nil + } + } else { + // if UpdatesOnly was used, mark initialization as complete + initDoneMarker = true + } + + // Used ordered consumer to deliver results. + subOpts := []SubOpt{OrderedConsumer()} + if !o.includeHistory { + subOpts = append(subOpts, DeliverLastPerSubject()) + } + if o.updatesOnly { + subOpts = append(subOpts, DeliverNew()) + } + sub, err := obs.js.Subscribe(allMeta, update, subOpts...) + if err != nil { + return nil, err + } + w.sub = sub + return w, nil +} + +type ListObjectsOpt interface { + configureListObjects(opts *listObjectOpts) error +} +type listObjectOpts struct { + ctx context.Context + // Include deleted objects in the result channel. + showDeleted bool +} + +type listObjectsFn func(opts *listObjectOpts) error + +func (opt listObjectsFn) configureListObjects(opts *listObjectOpts) error { + return opt(opts) +} + +// ListObjectsShowDeleted makes ListObjects() return deleted objects. +func ListObjectsShowDeleted() ListObjectsOpt { + return listObjectsFn(func(opts *listObjectOpts) error { + opts.showDeleted = true + return nil + }) +} + +// For nats.Context() support. +func (ctx ContextOpt) configureListObjects(opts *listObjectOpts) error { + opts.ctx = ctx + return nil +} + +// List will list all the objects in this store. +func (obs *obs) List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) { + var o listObjectOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureListObjects(&o); err != nil { + return nil, err + } + } + } + watchOpts := make([]WatchOpt, 0) + if !o.showDeleted { + watchOpts = append(watchOpts, IgnoreDeletes()) + } + watcher, err := obs.Watch(watchOpts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + if o.ctx == nil { + o.ctx = context.Background() + } + + var objs []*ObjectInfo + updates := watcher.Updates() +Updates: + for { + select { + case entry := <-updates: + if entry == nil { + break Updates + } + objs = append(objs, entry) + case <-o.ctx.Done(): + return nil, o.ctx.Err() + } + } + if len(objs) == 0 { + return nil, ErrNoObjectsFound + } + return objs, nil +} + +// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus +type ObjectBucketStatus struct { + nfo *StreamInfo + bucket string +} + +// Bucket is the name of the bucket +func (s *ObjectBucketStatus) Bucket() string { return s.bucket } + +// Description is the description supplied when creating the bucket +func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } + +// TTL indicates how long objects are kept in the bucket +func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } + +// Storage indicates the underlying JetStream storage technology used to store data +func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } + +// Replicas indicates how many storage replicas are kept for the data in the bucket +func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } + +// Sealed indicates the stream is sealed and cannot be modified in any way +func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } + +// Size is the combined size of all data in the bucket including metadata, in bytes +func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } + +// BackingStore indicates what technology is used for storage of the bucket +func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } + +// Metadata is the metadata supplied when creating the bucket +func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } + +// StreamInfo is the stream info retrieved to create the status +func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } + +// Status retrieves run-time status about a bucket +func (obs *obs) Status() (ObjectStoreStatus, error) { + nfo, err := obs.js.StreamInfo(obs.stream) + if err != nil { + return nil, err + } + + status := &ObjectBucketStatus{ + nfo: nfo, + bucket: obs.name, + } + + return status, nil +} + +// Read impl. +func (o *objResult) Read(p []byte) (n int, err error) { + o.Lock() + defer o.Unlock() + if ctx := o.ctx; ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + o.err = ctx.Err() + } else { + o.err = ErrTimeout + } + default: + } + } + if o.err != nil { + return 0, o.err + } + if o.r == nil { + return 0, io.EOF + } + + r := o.r.(net.Conn) + r.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err = r.Read(p) + if err, ok := err.(net.Error); ok && err.Timeout() { + if ctx := o.ctx; ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + return 0, ctx.Err() + } else { + return 0, ErrTimeout + } + default: + err = nil + } + } + } + if err == io.EOF { + // Make sure the digest matches. + sha := o.digest.Sum(nil) + rsha, decodeErr := DecodeObjectDigest(o.info.Digest) + if decodeErr != nil { + o.err = decodeErr + return 0, o.err + } + if !bytes.Equal(sha[:], rsha) { + o.err = ErrDigestMismatch + return 0, o.err + } + } + return n, err +} + +// Close impl. +func (o *objResult) Close() error { + o.Lock() + defer o.Unlock() + if o.r == nil { + return nil + } + return o.r.Close() +} + +func (o *objResult) setErr(err error) { + o.Lock() + defer o.Unlock() + o.err = err +} + +func (o *objResult) Info() (*ObjectInfo, error) { + o.Lock() + defer o.Unlock() + return o.info, o.err +} + +func (o *objResult) Error() error { + o.Lock() + defer o.Unlock() + return o.err +} + +// ObjectStoreNames is used to retrieve a list of bucket names +func (js *js) ObjectStoreNames(opts ...ObjectOpt) <-chan string { + var o objOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureObject(&o); err != nil { + return nil + } + } + } + ch := make(chan string) + var cancel context.CancelFunc + if o.ctx == nil { + o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) + } + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") + l.js.opts.ctx = o.ctx + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, "OBJ_") { + continue + } + select { + case ch <- info.Config.Name: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// ObjectStores is used to retrieve a list of bucket statuses +func (js *js) ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus { + var o objOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureObject(&o); err != nil { + return nil + } + } + } + ch := make(chan ObjectStoreStatus) + var cancel context.CancelFunc + if o.ctx == nil { + o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) + } + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") + l.js.opts.ctx = o.ctx + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, "OBJ_") { + continue + } + select { + case ch <- &ObjectBucketStatus{ + nfo: info, + bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), + }: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} diff --git a/vendor/github.com/nats-io/nats.go/parser.go b/vendor/github.com/nats-io/nats.go/parser.go new file mode 100644 index 00000000..70204e60 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/parser.go @@ -0,0 +1,554 @@ +// Copyright 2012-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "fmt" +) + +type msgArg struct { + subject []byte + reply []byte + sid int64 + hdr int + size int +} + +const MAX_CONTROL_LINE_SIZE = 4096 + +type parseState struct { + state int + as int + drop int + hdr int + ma msgArg + argBuf []byte + msgBuf []byte + msgCopied bool + scratch [MAX_CONTROL_LINE_SIZE]byte +} + +const ( + OP_START = iota + OP_PLUS + OP_PLUS_O + OP_PLUS_OK + OP_MINUS + OP_MINUS_E + OP_MINUS_ER + OP_MINUS_ERR + OP_MINUS_ERR_SPC + MINUS_ERR_ARG + OP_M + OP_MS + OP_MSG + OP_MSG_SPC + MSG_ARG + MSG_PAYLOAD + MSG_END + OP_H + OP_P + OP_PI + OP_PIN + OP_PING + OP_PO + OP_PON + OP_PONG + OP_I + OP_IN + OP_INF + OP_INFO + OP_INFO_SPC + INFO_ARG +) + +// parse is the fast protocol parser engine. +func (nc *Conn) parse(buf []byte) error { + var i int + var b byte + + // Move to loop instead of range syntax to allow jumping of i + for i = 0; i < len(buf); i++ { + b = buf[i] + + switch nc.ps.state { + case OP_START: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + nc.ps.hdr = -1 + nc.ps.ma.hdr = -1 + case 'H', 'h': + nc.ps.state = OP_H + nc.ps.hdr = 0 + nc.ps.ma.hdr = 0 + case 'P', 'p': + nc.ps.state = OP_P + case '+': + nc.ps.state = OP_PLUS + case '-': + nc.ps.state = OP_MINUS + case 'I', 'i': + nc.ps.state = OP_I + default: + goto parseErr + } + case OP_H: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + default: + goto parseErr + } + case OP_M: + switch b { + case 'S', 's': + nc.ps.state = OP_MS + default: + goto parseErr + } + case OP_MS: + switch b { + case 'G', 'g': + nc.ps.state = OP_MSG + default: + goto parseErr + } + case OP_MSG: + switch b { + case ' ', '\t': + nc.ps.state = OP_MSG_SPC + default: + goto parseErr + } + case OP_MSG_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MSG_ARG + nc.ps.as = i + } + case MSG_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + if err := nc.processMsgArgs(arg); err != nil { + return err + } + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD + + // jump ahead with the index. If this overruns + // what is left we fall out and process a split buffer. + i = nc.ps.as + nc.ps.ma.size - 1 + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case MSG_PAYLOAD: + if nc.ps.msgBuf != nil { + if len(nc.ps.msgBuf) >= nc.ps.ma.size { + nc.processMsg(nc.ps.msgBuf) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END + } else { + // copy as much as we can to the buffer and skip ahead. + toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) + avail := len(buf) - i + + if avail < toCopy { + toCopy = avail + } + + if toCopy > 0 { + start := len(nc.ps.msgBuf) + // This is needed for copy to work. + nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] + copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) + // Update our index + i = (i + toCopy) - 1 + } else { + nc.ps.msgBuf = append(nc.ps.msgBuf, b) + } + } + } else if i-nc.ps.as >= nc.ps.ma.size { + nc.processMsg(buf[nc.ps.as:i]) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END + } + case MSG_END: + switch b { + case '\n': + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + continue + } + case OP_PLUS: + switch b { + case 'O', 'o': + nc.ps.state = OP_PLUS_O + default: + goto parseErr + } + case OP_PLUS_O: + switch b { + case 'K', 'k': + nc.ps.state = OP_PLUS_OK + default: + goto parseErr + } + case OP_PLUS_OK: + switch b { + case '\n': + nc.processOK() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_MINUS: + switch b { + case 'E', 'e': + nc.ps.state = OP_MINUS_E + default: + goto parseErr + } + case OP_MINUS_E: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ER + default: + goto parseErr + } + case OP_MINUS_ER: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ERR + default: + goto parseErr + } + case OP_MINUS_ERR: + switch b { + case ' ', '\t': + nc.ps.state = OP_MINUS_ERR_SPC + default: + goto parseErr + } + case OP_MINUS_ERR_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MINUS_ERR_ARG + nc.ps.as = i + } + case MINUS_ERR_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processErr(string(arg)) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case OP_P: + switch b { + case 'I', 'i': + nc.ps.state = OP_PI + case 'O', 'o': + nc.ps.state = OP_PO + default: + goto parseErr + } + case OP_PO: + switch b { + case 'N', 'n': + nc.ps.state = OP_PON + default: + goto parseErr + } + case OP_PON: + switch b { + case 'G', 'g': + nc.ps.state = OP_PONG + default: + goto parseErr + } + case OP_PONG: + switch b { + case '\n': + nc.processPong() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_PI: + switch b { + case 'N', 'n': + nc.ps.state = OP_PIN + default: + goto parseErr + } + case OP_PIN: + switch b { + case 'G', 'g': + nc.ps.state = OP_PING + default: + goto parseErr + } + case OP_PING: + switch b { + case '\n': + nc.processPing() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_I: + switch b { + case 'N', 'n': + nc.ps.state = OP_IN + default: + goto parseErr + } + case OP_IN: + switch b { + case 'F', 'f': + nc.ps.state = OP_INF + default: + goto parseErr + } + case OP_INF: + switch b { + case 'O', 'o': + nc.ps.state = OP_INFO + default: + goto parseErr + } + case OP_INFO: + switch b { + case ' ', '\t': + nc.ps.state = OP_INFO_SPC + default: + goto parseErr + } + case OP_INFO_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = INFO_ARG + nc.ps.as = i + } + case INFO_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processAsyncInfo(arg) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + default: + goto parseErr + } + } + // Check for split buffer scenarios + if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) + // FIXME, check max len + } + // Check for split msg + if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { + // We need to clone the msgArg if it is still referencing the + // read buffer and we are not able to process the msg. + if nc.ps.argBuf == nil { + nc.cloneMsgArg() + } + + // If we will overflow the scratch buffer, just create a + // new buffer to hold the split message. + if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { + lrem := len(buf[nc.ps.as:]) + + nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) + copy(nc.ps.msgBuf, buf[nc.ps.as:]) + nc.ps.msgCopied = true + } else { + nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] + nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) + } + } + + return nil + +parseErr: + return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) +} + +// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but +// we need to hold onto it into the next read. +func (nc *Conn) cloneMsgArg() { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) + nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] + if nc.ps.ma.reply != nil { + nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] + } +} + +const argsLenMax = 4 + +func (nc *Conn) processMsgArgs(arg []byte) error { + // Use separate function for header based messages. + if nc.ps.hdr >= 0 { + return nc.processHeaderMsgArgs(arg) + } + + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 3: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.size = int(parseInt64(args[2])) + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.size = int(parseInt64(args[3])) + default: + return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// processHeaderMsgArgs is for a header based message. +func (nc *Conn) processHeaderMsgArgs(arg []byte) error { + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.hdr = int(parseInt64(args[2])) + nc.ps.ma.size = int(parseInt64(args[3])) + case 5: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.hdr = int(parseInt64(args[3])) + nc.ps.ma.size = int(parseInt64(args[4])) + default: + return fmt.Errorf("nats: processHeaderMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.hdr < 0 || nc.ps.ma.hdr > nc.ps.ma.size { + return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Header Size: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// ASCII numbers 0-9 +const ( + ascii_0 = 48 + ascii_9 = 57 +) + +// parseInt64 expects decimal positive numbers. We +// return -1 to signal error +func parseInt64(d []byte) (n int64) { + if len(d) == 0 { + return -1 + } + for _, dec := range d { + if dec < ascii_0 || dec > ascii_9 { + return -1 + } + n = n*10 + (int64(dec) - ascii_0) + } + return n +} diff --git a/vendor/github.com/nats-io/nats.go/rand.go b/vendor/github.com/nats-io/nats.go/rand.go new file mode 100644 index 00000000..0cdee0ac --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/rand.go @@ -0,0 +1,29 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.20 +// +build !go1.20 + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "math/rand" + "time" +) + +func init() { + // This is not needed since Go 1.20 because now rand.Seed always happens + // by default (uses runtime.fastrand64 instead as source). + rand.Seed(time.Now().UnixNano()) +} diff --git a/vendor/github.com/nats-io/nats.go/testing_internal.go b/vendor/github.com/nats-io/nats.go/testing_internal.go new file mode 100644 index 00000000..18397026 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/testing_internal.go @@ -0,0 +1,59 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build internal_testing +// +build internal_testing + +// Functions in this file are only available when building nats.go with the +// internal_testing build tag. They are used by the nats.go test suite. +package nats + +// AddMsgFilter adds a message filter for the given subject +// to the connection. The filter will be called for each +// message received on the subject. If the filter returns +// nil, the message will be dropped. +func (nc *Conn) AddMsgFilter(subject string, filter msgFilter) { + nc.subsMu.Lock() + defer nc.subsMu.Unlock() + + if nc.filters == nil { + nc.filters = make(map[string]msgFilter) + } + nc.filters[subject] = filter +} + +// RemoveMsgFilter removes a message filter for the given subject. +func (nc *Conn) RemoveMsgFilter(subject string) { + nc.subsMu.Lock() + defer nc.subsMu.Unlock() + + if nc.filters != nil { + delete(nc.filters, subject) + if len(nc.filters) == 0 { + nc.filters = nil + } + } +} + +// IsJSControlMessage returns true if the message is a JetStream control message. +func IsJSControlMessage(msg *Msg) (bool, int) { + return isJSControlMessage(msg) +} + +// CloseTCPConn closes the underlying TCP connection. +// It can be used to simulate a disconnect. +func (nc *Conn) CloseTCPConn() { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.conn.Close() +} diff --git a/vendor/github.com/nats-io/nats.go/timer.go b/vendor/github.com/nats-io/nats.go/timer.go new file mode 100644 index 00000000..4fb02ecb --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/timer.go @@ -0,0 +1,56 @@ +// Copyright 2017-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "sync" + "time" +) + +// global pool of *time.Timer's. can be used by multiple goroutines concurrently. +var globalTimerPool timerPool + +// timerPool provides GC-able pooling of *time.Timer's. +// can be used by multiple goroutines concurrently. +type timerPool struct { + p sync.Pool +} + +// Get returns a timer that completes after the given duration. +func (tp *timerPool) Get(d time.Duration) *time.Timer { + if t, _ := tp.p.Get().(*time.Timer); t != nil { + t.Reset(d) + return t + } + + return time.NewTimer(d) +} + +// Put pools the given timer. +// +// There is no need to call t.Stop() before calling Put. +// +// Put will try to stop the timer before pooling. If the +// given timer already expired, Put will read the unreceived +// value if there is one. +func (tp *timerPool) Put(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + + tp.p.Put(t) +} diff --git a/vendor/github.com/nats-io/nats.go/util/tls.go b/vendor/github.com/nats-io/nats.go/util/tls.go new file mode 100644 index 00000000..af9f51f0 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/util/tls.go @@ -0,0 +1,28 @@ +// Copyright 2017-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.8 +// +build go1.8 + +package util + +import "crypto/tls" + +// CloneTLSConfig returns a copy of c. +func CloneTLSConfig(c *tls.Config) *tls.Config { + if c == nil { + return &tls.Config{} + } + + return c.Clone() +} diff --git a/vendor/github.com/nats-io/nats.go/util/tls_go17.go b/vendor/github.com/nats-io/nats.go/util/tls_go17.go new file mode 100644 index 00000000..44d46b42 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/util/tls_go17.go @@ -0,0 +1,50 @@ +// Copyright 2016-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.7 && !go1.8 +// +build go1.7,!go1.8 + +package util + +import ( + "crypto/tls" +) + +// CloneTLSConfig returns a copy of c. Only the exported fields are copied. +// This is temporary, until this is provided by the language. +// https://go-review.googlesource.com/#/c/28075/ +func CloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/nats-io/nats.go/ws.go b/vendor/github.com/nats-io/nats.go/ws.go new file mode 100644 index 00000000..2c2d421a --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/ws.go @@ -0,0 +1,780 @@ +// Copyright 2021-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + mrand "math/rand" + "net/http" + "net/url" + "strings" + "time" + "unicode/utf8" + + "github.com/klauspost/compress/flate" +) + +type wsOpCode int + +const ( + // From https://tools.ietf.org/html/rfc6455#section-5.2 + wsTextMessage = wsOpCode(1) + wsBinaryMessage = wsOpCode(2) + wsCloseMessage = wsOpCode(8) + wsPingMessage = wsOpCode(9) + wsPongMessage = wsOpCode(10) + + wsFinalBit = 1 << 7 + wsRsv1Bit = 1 << 6 // Used for compression, from https://tools.ietf.org/html/rfc7692#section-6 + wsRsv2Bit = 1 << 5 + wsRsv3Bit = 1 << 4 + + wsMaskBit = 1 << 7 + + wsContinuationFrame = 0 + wsMaxFrameHeaderSize = 14 + wsMaxControlPayloadSize = 125 + wsCloseSatusSize = 2 + + // From https://tools.ietf.org/html/rfc6455#section-11.7 + wsCloseStatusNormalClosure = 1000 + wsCloseStatusNoStatusReceived = 1005 + wsCloseStatusAbnormalClosure = 1006 + wsCloseStatusInvalidPayloadData = 1007 + + wsScheme = "ws" + wsSchemeTLS = "wss" + + wsPMCExtension = "permessage-deflate" // per-message compression + wsPMCSrvNoCtx = "server_no_context_takeover" + wsPMCCliNoCtx = "client_no_context_takeover" + wsPMCReqHeaderValue = wsPMCExtension + "; " + wsPMCSrvNoCtx + "; " + wsPMCCliNoCtx +) + +// From https://tools.ietf.org/html/rfc6455#section-1.3 +var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} + +type websocketReader struct { + r io.Reader + pending [][]byte + ib []byte + ff bool + fc bool + nl bool + dc *wsDecompressor + nc *Conn +} + +type wsDecompressor struct { + flate io.ReadCloser + bufs [][]byte + off int +} + +type websocketWriter struct { + w io.Writer + compress bool + compressor *flate.Writer + ctrlFrames [][]byte // pending frames that should be sent at the next Write() + cm []byte // close message that needs to be sent when everything else has been sent + cmDone bool // a close message has been added or sent (never going back to false) + noMoreSend bool // if true, even if there is a Write() call, we should not send anything +} + +func (d *wsDecompressor) Read(dst []byte) (int, error) { + if len(dst) == 0 { + return 0, nil + } + if len(d.bufs) == 0 { + return 0, io.EOF + } + copied := 0 + rem := len(dst) + for buf := d.bufs[0]; buf != nil && rem > 0; { + n := len(buf[d.off:]) + if n > rem { + n = rem + } + copy(dst[copied:], buf[d.off:d.off+n]) + copied += n + rem -= n + d.off += n + buf = d.nextBuf() + } + return copied, nil +} + +func (d *wsDecompressor) nextBuf() []byte { + // We still have remaining data in the first buffer + if d.off != len(d.bufs[0]) { + return d.bufs[0] + } + // We read the full first buffer. Reset offset. + d.off = 0 + // We were at the last buffer, so we are done. + if len(d.bufs) == 1 { + d.bufs = nil + return nil + } + // Here we move to the next buffer. + d.bufs = d.bufs[1:] + return d.bufs[0] +} + +func (d *wsDecompressor) ReadByte() (byte, error) { + if len(d.bufs) == 0 { + return 0, io.EOF + } + b := d.bufs[0][d.off] + d.off++ + d.nextBuf() + return b, nil +} + +func (d *wsDecompressor) addBuf(b []byte) { + d.bufs = append(d.bufs, b) +} + +func (d *wsDecompressor) decompress() ([]byte, error) { + d.off = 0 + // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 + // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader + // does not report unexpected EOF. + d.bufs = append(d.bufs, compressFinalBlock) + // Create or reset the decompressor with his object (wsDecompressor) + // that provides Read() and ReadByte() APIs that will consume from + // the compressed buffers (d.bufs). + if d.flate == nil { + d.flate = flate.NewReader(d) + } else { + d.flate.(flate.Resetter).Reset(d, nil) + } + b, err := io.ReadAll(d.flate) + // Now reset the compressed buffers list + d.bufs = nil + return b, err +} + +func wsNewReader(r io.Reader) *websocketReader { + return &websocketReader{r: r, ff: true} +} + +// From now on, reads will be from the readLoop and we will need to +// acquire the connection lock should we have to send/write a control +// message from handleControlFrame. +// +// Note: this runs under the connection lock. +func (r *websocketReader) doneWithConnect() { + r.nl = true +} + +func (r *websocketReader) Read(p []byte) (int, error) { + var err error + var buf []byte + + if l := len(r.ib); l > 0 { + buf = r.ib + r.ib = nil + } else { + if len(r.pending) > 0 { + return r.drainPending(p), nil + } + + // Get some data from the underlying reader. + n, err := r.r.Read(p) + if err != nil { + return 0, err + } + buf = p[:n] + } + + // Now parse this and decode frames. We will possibly read more to + // ensure that we get a full frame. + var ( + tmpBuf []byte + pos int + max = len(buf) + rem = 0 + ) + for pos < max { + b0 := buf[pos] + frameType := wsOpCode(b0 & 0xF) + final := b0&wsFinalBit != 0 + compressed := b0&wsRsv1Bit != 0 + pos++ + + tmpBuf, pos, err = wsGet(r.r, buf, pos, 1) + if err != nil { + return 0, err + } + b1 := tmpBuf[0] + + // Store size in case it is < 125 + rem = int(b1 & 0x7F) + + switch frameType { + case wsPingMessage, wsPongMessage, wsCloseMessage: + if rem > wsMaxControlPayloadSize { + return 0, fmt.Errorf( + fmt.Sprintf("control frame length bigger than maximum allowed of %v bytes", + wsMaxControlPayloadSize)) + } + if compressed { + return 0, errors.New("control frame should not be compressed") + } + if !final { + return 0, errors.New("control frame does not have final bit set") + } + case wsTextMessage, wsBinaryMessage: + if !r.ff { + return 0, errors.New("new message started before final frame for previous message was received") + } + r.ff = final + r.fc = compressed + case wsContinuationFrame: + // Compressed bit must be only set in the first frame + if r.ff || compressed { + return 0, errors.New("invalid continuation frame") + } + r.ff = final + default: + return 0, fmt.Errorf("unknown opcode %v", frameType) + } + + // If the encoded size is <= 125, then `rem` is simply the remainder size of the + // frame. If it is 126, then the actual size is encoded as a uint16. For larger + // frames, `rem` will initially be 127 and the actual size is encoded as a uint64. + switch rem { + case 126: + tmpBuf, pos, err = wsGet(r.r, buf, pos, 2) + if err != nil { + return 0, err + } + rem = int(binary.BigEndian.Uint16(tmpBuf)) + case 127: + tmpBuf, pos, err = wsGet(r.r, buf, pos, 8) + if err != nil { + return 0, err + } + rem = int(binary.BigEndian.Uint64(tmpBuf)) + } + + // Handle control messages in place... + if wsIsControlFrame(frameType) { + pos, err = r.handleControlFrame(frameType, buf, pos, rem) + if err != nil { + return 0, err + } + rem = 0 + continue + } + + var b []byte + // This ensures that we get the full payload for this frame. + b, pos, err = wsGet(r.r, buf, pos, rem) + if err != nil { + return 0, err + } + // We read the full frame. + rem = 0 + addToPending := true + if r.fc { + // Don't add to pending if we are not dealing with the final frame. + addToPending = r.ff + // Add the compressed payload buffer to the list. + r.addCBuf(b) + // Decompress only when this is the final frame. + if r.ff { + b, err = r.dc.decompress() + if err != nil { + return 0, err + } + r.fc = false + } + } + // Add to the pending list if dealing with uncompressed frames or + // after we have received the full compressed message and decompressed it. + if addToPending { + r.pending = append(r.pending, b) + } + } + // In case of compression, there may be nothing to drain + if len(r.pending) > 0 { + return r.drainPending(p), nil + } + return 0, nil +} + +func (r *websocketReader) addCBuf(b []byte) { + if r.dc == nil { + r.dc = &wsDecompressor{} + } + // Add a copy of the incoming buffer to the list of compressed buffers. + r.dc.addBuf(append([]byte(nil), b...)) +} + +func (r *websocketReader) drainPending(p []byte) int { + var n int + var max = len(p) + + for i, buf := range r.pending { + if n+len(buf) <= max { + copy(p[n:], buf) + n += len(buf) + } else { + // Is there room left? + if n < max { + // Write the partial and update this slice. + rem := max - n + copy(p[n:], buf[:rem]) + n += rem + r.pending[i] = buf[rem:] + } + // These are the remaining slices that will need to be used at + // the next Read() call. + r.pending = r.pending[i:] + return n + } + } + r.pending = r.pending[:0] + return n +} + +func wsGet(r io.Reader, buf []byte, pos, needed int) ([]byte, int, error) { + avail := len(buf) - pos + if avail >= needed { + return buf[pos : pos+needed], pos + needed, nil + } + b := make([]byte, needed) + start := copy(b, buf[pos:]) + for start != needed { + n, err := r.Read(b[start:cap(b)]) + start += n + if err != nil { + return b, start, err + } + } + return b, pos + avail, nil +} + +func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos, rem int) (int, error) { + var payload []byte + var err error + + if rem > 0 { + payload, pos, err = wsGet(r.r, buf, pos, rem) + if err != nil { + return pos, err + } + } + switch frameType { + case wsCloseMessage: + status := wsCloseStatusNoStatusReceived + var body string + lp := len(payload) + // If there is a payload, the status is represented as a 2-byte + // unsigned integer (in network byte order). Then, there may be an + // optional body. + hasStatus, hasBody := lp >= wsCloseSatusSize, lp > wsCloseSatusSize + if hasStatus { + // Decode the status + status = int(binary.BigEndian.Uint16(payload[:wsCloseSatusSize])) + // Now if there is a body, capture it and make sure this is a valid UTF-8. + if hasBody { + body = string(payload[wsCloseSatusSize:]) + if !utf8.ValidString(body) { + // https://tools.ietf.org/html/rfc6455#section-5.5.1 + // If body is present, it must be a valid utf8 + status = wsCloseStatusInvalidPayloadData + body = "invalid utf8 body in close frame" + } + } + } + r.nc.wsEnqueueCloseMsg(r.nl, status, body) + // Return io.EOF so that readLoop will close the connection as client closed + // after processing pending buffers. + return pos, io.EOF + case wsPingMessage: + r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload) + case wsPongMessage: + // Nothing to do.. + } + return pos, nil +} + +func (w *websocketWriter) Write(p []byte) (int, error) { + if w.noMoreSend { + return 0, nil + } + var total int + var n int + var err error + // If there are control frames, they can be sent now. Actually spec says + // that they should be sent ASAP, so we will send before any application data. + if len(w.ctrlFrames) > 0 { + n, err = w.writeCtrlFrames() + if err != nil { + return n, err + } + total += n + } + // Do the following only if there is something to send. + // We will end with checking for need to send close message. + if len(p) > 0 { + if w.compress { + buf := &bytes.Buffer{} + if w.compressor == nil { + w.compressor, _ = flate.NewWriter(buf, flate.BestSpeed) + } else { + w.compressor.Reset(buf) + } + if n, err = w.compressor.Write(p); err != nil { + return n, err + } + if err = w.compressor.Flush(); err != nil { + return n, err + } + b := buf.Bytes() + p = b[:len(b)-4] + } + fh, key := wsCreateFrameHeader(w.compress, wsBinaryMessage, len(p)) + wsMaskBuf(key, p) + n, err = w.w.Write(fh) + total += n + if err == nil { + n, err = w.w.Write(p) + total += n + } + } + if err == nil && w.cm != nil { + n, err = w.writeCloseMsg() + total += n + } + return total, err +} + +func (w *websocketWriter) writeCtrlFrames() (int, error) { + var ( + n int + total int + i int + err error + ) + for ; i < len(w.ctrlFrames); i++ { + buf := w.ctrlFrames[i] + n, err = w.w.Write(buf) + total += n + if err != nil { + break + } + } + if i != len(w.ctrlFrames) { + w.ctrlFrames = w.ctrlFrames[i+1:] + } else { + w.ctrlFrames = w.ctrlFrames[:0] + } + return total, err +} + +func (w *websocketWriter) writeCloseMsg() (int, error) { + n, err := w.w.Write(w.cm) + w.cm, w.noMoreSend = nil, true + return n, err +} + +func wsMaskBuf(key, buf []byte) { + for i := 0; i < len(buf); i++ { + buf[i] ^= key[i&3] + } +} + +// Create the frame header. +// Encodes the frame type and optional compression flag, and the size of the payload. +func wsCreateFrameHeader(compressed bool, frameType wsOpCode, l int) ([]byte, []byte) { + fh := make([]byte, wsMaxFrameHeaderSize) + n, key := wsFillFrameHeader(fh, compressed, frameType, l) + return fh[:n], key +} + +func wsFillFrameHeader(fh []byte, compressed bool, frameType wsOpCode, l int) (int, []byte) { + var n int + b := byte(frameType) + b |= wsFinalBit + if compressed { + b |= wsRsv1Bit + } + b1 := byte(wsMaskBit) + switch { + case l <= 125: + n = 2 + fh[0] = b + fh[1] = b1 | byte(l) + case l < 65536: + n = 4 + fh[0] = b + fh[1] = b1 | 126 + binary.BigEndian.PutUint16(fh[2:], uint16(l)) + default: + n = 10 + fh[0] = b + fh[1] = b1 | 127 + binary.BigEndian.PutUint64(fh[2:], uint64(l)) + } + var key []byte + var keyBuf [4]byte + if _, err := io.ReadFull(rand.Reader, keyBuf[:4]); err != nil { + kv := mrand.Int31() + binary.LittleEndian.PutUint32(keyBuf[:4], uint32(kv)) + } + copy(fh[n:], keyBuf[:4]) + key = fh[n : n+4] + n += 4 + return n, key +} + +func (nc *Conn) wsInitHandshake(u *url.URL) error { + compress := nc.Opts.Compression + tlsRequired := u.Scheme == wsSchemeTLS || nc.Opts.Secure || nc.Opts.TLSConfig != nil || nc.Opts.TLSCertCB != nil || nc.Opts.RootCAsCB != nil + // Do TLS here as needed. + if tlsRequired { + if err := nc.makeTLSConn(); err != nil { + return err + } + } else { + nc.bindToNewConn() + } + + var err error + + // For http request, we need the passed URL to contain either http or https scheme. + scheme := "http" + if tlsRequired { + scheme = "https" + } + ustr := fmt.Sprintf("%s://%s", scheme, u.Host) + + if nc.Opts.ProxyPath != "" { + proxyPath := nc.Opts.ProxyPath + if !strings.HasPrefix(proxyPath, "/") { + proxyPath = "/" + proxyPath + } + ustr += proxyPath + } + + u, err = url.Parse(ustr) + if err != nil { + return err + } + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + wsKey, err := wsMakeChallengeKey() + if err != nil { + return err + } + + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{wsKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if compress { + req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) + } + if err := req.Write(nc.conn); err != nil { + return err + } + + var resp *http.Response + + br := bufio.NewReaderSize(nc.conn, 4096) + nc.conn.SetReadDeadline(time.Now().Add(nc.Opts.Timeout)) + resp, err = http.ReadResponse(br, req) + if err == nil && + (resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) { + + err = fmt.Errorf("invalid websocket connection") + } + // Check compression extension... + if err == nil && compress { + // Check that not only permessage-deflate extension is present, but that + // we also have server and client no context take over. + srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header) + + // If server does not support compression, then simply disable it in our side. + if !srvCompress { + compress = false + } else if !noCtxTakeover { + err = fmt.Errorf("compression negotiation error") + } + } + if resp != nil { + resp.Body.Close() + } + nc.conn.SetReadDeadline(time.Time{}) + if err != nil { + return err + } + + wsr := wsNewReader(nc.br.r) + wsr.nc = nc + // We have to slurp whatever is in the bufio reader and copy to br.r + if n := br.Buffered(); n != 0 { + wsr.ib, _ = br.Peek(n) + } + nc.br.r = wsr + nc.bw.w = &websocketWriter{w: nc.bw.w, compress: compress} + nc.ws = true + return nil +} + +func (nc *Conn) wsClose() { + nc.mu.Lock() + defer nc.mu.Unlock() + if !nc.ws { + return + } + nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_) +} + +func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) { + // In some low-level unit tests it will happen... + if nc == nil { + return + } + if needsLock { + nc.mu.Lock() + defer nc.mu.Unlock() + } + nc.wsEnqueueCloseMsgLocked(status, payload) +} + +func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { + wr, ok := nc.bw.w.(*websocketWriter) + if !ok || wr.cmDone { + return + } + statusAndPayloadLen := 2 + len(payload) + frame := make([]byte, 2+4+statusAndPayloadLen) + n, key := wsFillFrameHeader(frame, false, wsCloseMessage, statusAndPayloadLen) + // Set the status + binary.BigEndian.PutUint16(frame[n:], uint16(status)) + // If there is a payload, copy + if len(payload) > 0 { + copy(frame[n+2:], payload) + } + // Mask status + payload + wsMaskBuf(key, frame[n:n+statusAndPayloadLen]) + wr.cm = frame + wr.cmDone = true + nc.bw.flush() + if c := wr.compressor; c != nil { + c.Close() + } +} + +func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) { + // In some low-level unit tests it will happen... + if nc == nil { + return + } + if needsLock { + nc.mu.Lock() + defer nc.mu.Unlock() + } + wr, ok := nc.bw.w.(*websocketWriter) + if !ok { + return + } + fh, key := wsCreateFrameHeader(false, frameType, len(payload)) + wr.ctrlFrames = append(wr.ctrlFrames, fh) + if len(payload) > 0 { + wsMaskBuf(key, payload) + wr.ctrlFrames = append(wr.ctrlFrames, payload) + } + nc.bw.flush() +} + +func wsPMCExtensionSupport(header http.Header) (bool, bool) { + for _, extensionList := range header["Sec-Websocket-Extensions"] { + extensions := strings.Split(extensionList, ",") + for _, extension := range extensions { + extension = strings.Trim(extension, " \t") + params := strings.Split(extension, ";") + for i, p := range params { + p = strings.Trim(p, " \t") + if strings.EqualFold(p, wsPMCExtension) { + var snc bool + var cnc bool + for j := i + 1; j < len(params); j++ { + p = params[j] + p = strings.Trim(p, " \t") + if strings.EqualFold(p, wsPMCSrvNoCtx) { + snc = true + } else if strings.EqualFold(p, wsPMCCliNoCtx) { + cnc = true + } + if snc && cnc { + return true, true + } + } + return true, false + } + } + } + } + return false, false +} + +func wsMakeChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +func wsAcceptKey(key string) string { + h := sha1.New() + h.Write([]byte(key)) + h.Write(wsGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +// Returns true if the op code corresponds to a control frame. +func wsIsControlFrame(frameType wsOpCode) bool { + return frameType >= wsCloseMessage +} + +func isWebsocketScheme(u *url.URL) bool { + return u.Scheme == wsScheme || u.Scheme == wsSchemeTLS +} diff --git a/vendor/github.com/nats-io/nkeys/.gitignore b/vendor/github.com/nats-io/nkeys/.gitignore new file mode 100644 index 00000000..d23676d2 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +build/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ +.idea/ diff --git a/vendor/github.com/nats-io/nkeys/.goreleaser.yml b/vendor/github.com/nats-io/nkeys/.goreleaser.yml new file mode 100644 index 00000000..e5c4f154 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/.goreleaser.yml @@ -0,0 +1,63 @@ +project_name: nkeys +release: + github: + owner: nats-io + name: nkeys + name_template: '{{.Tag}}' + draft: true +builds: + - id: nk + main: ./nk/main.go + ldflags: "-X main.Version={{.Tag}}_{{.Commit}}" + binary: nk + goos: + - darwin + - linux + - windows + - freebsd + goarch: + - amd64 + - arm + - arm64 + - 386 + - mips64le + - s390x + goarm: + - 6 + - 7 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm + - goos: freebsd + goarch: arm64 + - goos: freebsd + goarch: 386 + +dist: build + +archives: + - name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm + }}v{{ .Arm }}{{ end }}' + wrap_in_directory: true + format: zip + files: + - README.md + - LICENSE + +checksum: + name_template: '{{ .ProjectName }}-v{{ .Version }}-checksums.txt' + +snapshot: + name_template: 'dev' + +nfpms: + - file_name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Arch }}{{ if .Arm + }}v{{ .Arm }}{{ end }}' + maintainer: nats.io + description: NKeys utility cli program + vendor: nats-io + bindir: /usr/local/bin + formats: + - deb \ No newline at end of file diff --git a/vendor/github.com/nats-io/nkeys/GOVERNANCE.md b/vendor/github.com/nats-io/nkeys/GOVERNANCE.md new file mode 100644 index 00000000..744d3bc2 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/GOVERNANCE.md @@ -0,0 +1,3 @@ +# NATS NKEYS Governance + +NATS NKEYS is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nkeys/LICENSE b/vendor/github.com/nats-io/nkeys/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/nkeys/MAINTAINERS.md b/vendor/github.com/nats-io/nkeys/MAINTAINERS.md new file mode 100644 index 00000000..23214655 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/MAINTAINERS.md @@ -0,0 +1,8 @@ +# Maintainers + +Maintainership is on a per project basis. + +### Maintainers + - Derek Collison [@derekcollison](https://github.com/derekcollison) + - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) + - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/vendor/github.com/nats-io/nkeys/README.md b/vendor/github.com/nats-io/nkeys/README.md new file mode 100644 index 00000000..37febc9a --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/README.md @@ -0,0 +1,69 @@ +# NKEYS + +[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) +[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/nkeys)](https://goreportcard.com/report/github.com/nats-io/nkeys) +[![Build Status](https://app.travis-ci.com/nats-io/nkeys.svg?branch=master)](https://app.travis-ci.com/nats-io/nkeys) +[![GoDoc](https://godoc.org/github.com/nats-io/nkeys?status.svg)](https://godoc.org/github.com/nats-io/nkeys) +[![Coverage Status](https://coveralls.io/repos/github/nats-io/nkeys/badge.svg?branch=master&service=github)](https://coveralls.io/github/nats-io/nkeys?branch=master) + +A public-key signature system based on [Ed25519](https://ed25519.cr.yp.to/) for the NATS ecosystem. + +## About + +The NATS ecosystem will be moving to [Ed25519](https://ed25519.cr.yp.to/) keys for identity, authentication and authorization for entities such as Accounts, Users, Servers and Clusters. + +Ed25519 is fast and resistant to side channel attacks. Generation of a seed key is all that is needed to be stored and kept safe, as the seed can generate both the public and private keys. + +The NATS system will utilize Ed25519 keys, meaning that NATS systems will never store or even have access to any private keys. Authentication will utilize a random challenge response mechanism. + +Dealing with 32 byte and 64 byte raw keys can be challenging. NKEYS is designed to formulate keys in a much friendlier fashion and references work done in cryptocurrencies, specifically [Stellar](https://www.stellar.org/). Bitcoin and others used a form of Base58 (or Base58Check) to encode raw keys. Stellar utilized a more traditional Base32 with a CRC16 and a version or prefix byte. NKEYS utilizes a similar format where the prefix will be 1 byte for public and private keys and will be 2 bytes for seeds. The base32 encoding of these prefixes will yield friendly human readable prefixes, e.g. '**N**' = server, '**C**' = cluster, '**O**' = operator, '**A**' = account, and '**U**' = user. '**P**' is used for private keys. For seeds, the first encoded prefix is '**S**', and the second character will be the type for the public key, e.g. "**SU**" is a seed for a user key pair, "**SA**" is a seed for an account key pair. + +## Installation + +Use the `go` command: + + $ go get github.com/nats-io/nkeys + +## nk - Command Line Utility + +Located under the nk [directory](https://github.com/nats-io/nkeys/tree/master/nk). + +## Basic API Usage +```go + +// Create a new User KeyPair +user, _ := nkeys.CreateUser() + +// Sign some data with a full key pair user. +data := []byte("Hello World") +sig, _ := user.Sign(data) + +// Verify the signature. +err = user.Verify(data, sig) + +// Access the seed, the only thing that needs to be stored and kept safe. +// seed = "SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY" +seed, _ := user.Seed() + +// Access the public key which can be shared. +// publicKey = "UD466L6EBCM3YY5HEGHJANNTN4LSKTSUXTH7RILHCKEQMQHTBNLHJJXT" +publicKey, _ := user.PublicKey() + +// Create a full User who can sign and verify from a private seed. +user, _ = nkeys.FromSeed(seed) + +// Create a User who can only verify signatures via a public key. +user, _ = nkeys.FromPublicKey(publicKey) + +// Create a User KeyPair with our own random data. +var rawSeed [32]byte +_, err := io.ReadFull(rand.Reader, rawSeed[:]) // Or some other random source. +user2, _ := nkeys.FromRawSeed(PrefixByteUser, rawSeed) + +``` + +## License + +Unless otherwise noted, the NATS source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. + diff --git a/vendor/github.com/nats-io/nkeys/TODO.md b/vendor/github.com/nats-io/nkeys/TODO.md new file mode 100644 index 00000000..2649c9e5 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/TODO.md @@ -0,0 +1,5 @@ + +# General + +- [ ] Child key derivation +- [ ] Hardware support, e.g. YubiHSM diff --git a/vendor/github.com/nats-io/nkeys/crc16.go b/vendor/github.com/nats-io/nkeys/crc16.go new file mode 100644 index 00000000..fbe38fbc --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/crc16.go @@ -0,0 +1,68 @@ +// Copyright 2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +// An implementation of crc16 according to CCITT standards for XMODEM. + +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +} + +// crc16 returns the 2-byte crc for the data provided. +func crc16(data []byte) uint16 { + var crc uint16 + for _, b := range data { + crc = ((crc << 8) & 0xffff) ^ crc16tab[((crc>>8)^uint16(b))&0x00FF] + } + return crc +} + +// validate will check the calculated crc16 checksum for data against the expected. +func validate(data []byte, expected uint16) error { + if crc16(data) != expected { + return ErrInvalidChecksum + } + return nil +} diff --git a/vendor/github.com/nats-io/nkeys/creds_utils.go b/vendor/github.com/nats-io/nkeys/creds_utils.go new file mode 100644 index 00000000..ecd94631 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/creds_utils.go @@ -0,0 +1,78 @@ +package nkeys + +import ( + "bytes" + "regexp" + "strings" +) + +var userConfigRE = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}\r?\n))`) + +// ParseDecoratedJWT takes a creds file and returns the JWT portion. +func ParseDecoratedJWT(contents []byte) (string, error) { + items := userConfigRE.FindAllSubmatch(contents, -1) + if len(items) == 0 { + return string(contents), nil + } + // First result should be the user JWT. + // We copy here so that if the file contained a seed file too we wipe appropriately. + raw := items[0][1] + tmp := make([]byte, len(raw)) + copy(tmp, raw) + return strings.TrimSpace(string(tmp)), nil +} + +// ParseDecoratedNKey takes a creds file, finds the NKey portion and creates a +// key pair from it. +func ParseDecoratedNKey(contents []byte) (KeyPair, error) { + var seed []byte + + items := userConfigRE.FindAllSubmatch(contents, -1) + if len(items) > 1 { + seed = items[1][1] + } else { + lines := bytes.Split(contents, []byte("\n")) + for _, line := range lines { + if bytes.HasPrefix(bytes.TrimSpace(line), []byte("SO")) || + bytes.HasPrefix(bytes.TrimSpace(line), []byte("SA")) || + bytes.HasPrefix(bytes.TrimSpace(line), []byte("SU")) { + seed = line + break + } + } + } + if seed == nil { + return nil, ErrNoSeedFound + } + if !bytes.HasPrefix(seed, []byte("SO")) && + !bytes.HasPrefix(seed, []byte("SA")) && + !bytes.HasPrefix(seed, []byte("SU")) { + return nil, ErrInvalidNkeySeed + } + kp, err := FromSeed(seed) + if err != nil { + return nil, err + } + return kp, nil +} + +// ParseDecoratedUserNKey takes a creds file, finds the NKey portion and creates a +// key pair from it. Similar to ParseDecoratedNKey but fails for non-user keys. +func ParseDecoratedUserNKey(contents []byte) (KeyPair, error) { + nk, err := ParseDecoratedNKey(contents) + if err != nil { + return nil, err + } + seed, err := nk.Seed() + if err != nil { + return nil, err + } + if !bytes.HasPrefix(seed, []byte("SU")) { + return nil, ErrInvalidUserSeed + } + kp, err := FromSeed(seed) + if err != nil { + return nil, err + } + return kp, nil +} diff --git a/vendor/github.com/nats-io/nkeys/dependencies.md b/vendor/github.com/nats-io/nkeys/dependencies.md new file mode 100644 index 00000000..370184aa --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/dependencies.md @@ -0,0 +1,12 @@ +# External Dependencies + +This file lists the dependencies used in this repository. + +| Dependency | License | +|-|-| +| Go | BSD 3-Clause "New" or "Revised" License | +| golang.org/x/crypto v0.3.0 | BSD 3-Clause "New" or "Revised" License | +| golang.org/x/net v0.2.0 | BSD 3-Clause "New" or "Revised" License | +| golang.org/x/sys v0.2.0 | BSD 3-Clause "New" or "Revised" License | +| golang.org/x/term v0.2.0 | BSD 3-Clause "New" or "Revised" License | +| golang.org/x/text v0.4.0 | BSD 3-Clause "New" or "Revised" License | diff --git a/vendor/github.com/nats-io/nkeys/errors.go b/vendor/github.com/nats-io/nkeys/errors.go new file mode 100644 index 00000000..a30bb96e --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/errors.go @@ -0,0 +1,50 @@ +// Copyright 2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +// Errors +const ( + ErrInvalidPrefixByte = nkeysError("nkeys: invalid prefix byte") + ErrInvalidKey = nkeysError("nkeys: invalid key") + ErrInvalidPublicKey = nkeysError("nkeys: invalid public key") + ErrInvalidPrivateKey = nkeysError("nkeys: invalid private key") + ErrInvalidSeedLen = nkeysError("nkeys: invalid seed length") + ErrInvalidSeed = nkeysError("nkeys: invalid seed") + ErrInvalidEncoding = nkeysError("nkeys: invalid encoded key") + ErrInvalidSignature = nkeysError("nkeys: signature verification failed") + ErrCannotSign = nkeysError("nkeys: can not sign, no private key available") + ErrPublicKeyOnly = nkeysError("nkeys: no seed or private key available") + ErrIncompatibleKey = nkeysError("nkeys: incompatible key") + ErrInvalidChecksum = nkeysError("nkeys: invalid checksum") + ErrNoSeedFound = nkeysError("nkeys: no nkey seed found") + ErrInvalidNkeySeed = nkeysError("nkeys: doesn't contain a seed nkey") + ErrInvalidUserSeed = nkeysError("nkeys: doesn't contain an user seed nkey") + ErrInvalidRecipient = nkeysError("nkeys: not a valid recipient public curve key") + ErrInvalidSender = nkeysError("nkeys: not a valid sender public curve key") + ErrInvalidCurveKey = nkeysError("nkeys: not a valid curve key") + ErrInvalidCurveSeed = nkeysError("nkeys: not a valid curve seed") + ErrInvalidEncrypted = nkeysError("nkeys: encrypted input is not valid") + ErrInvalidEncVersion = nkeysError("nkeys: encrypted input wrong version") + ErrCouldNotDecrypt = nkeysError("nkeys: could not decrypt input") + ErrInvalidCurveKeyOperation = nkeysError("nkeys: curve key is not valid for sign/verify") + ErrInvalidNKeyOperation = nkeysError("nkeys: only curve key can seal/open") + ErrCannotOpen = nkeysError("nkeys: cannot open no private curve key available") + ErrCannotSeal = nkeysError("nkeys: cannot seal no private curve key available") +) + +type nkeysError string + +func (e nkeysError) Error() string { + return string(e) +} diff --git a/vendor/github.com/nats-io/nkeys/keypair.go b/vendor/github.com/nats-io/nkeys/keypair.go new file mode 100644 index 00000000..9d055180 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/keypair.go @@ -0,0 +1,146 @@ +// Copyright 2018-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "bytes" + "crypto/rand" + "io" + + "golang.org/x/crypto/ed25519" +) + +// kp is the internal struct for a kepypair using seed. +type kp struct { + seed []byte +} + +// All seeds are 32 bytes long. +const seedLen = 32 + +// CreatePair will create a KeyPair based on the rand entropy and a type/prefix byte. +func CreatePair(prefix PrefixByte) (KeyPair, error) { + return CreatePairWithRand(prefix, rand.Reader) +} + +// CreatePair will create a KeyPair based on the rand reader and a type/prefix byte. rand can be nil. +func CreatePairWithRand(prefix PrefixByte, rr io.Reader) (KeyPair, error) { + if prefix == PrefixByteCurve { + return CreateCurveKeysWithRand(rr) + } + if rr == nil { + rr = rand.Reader + } + var rawSeed [seedLen]byte + + _, err := io.ReadFull(rr, rawSeed[:]) + if err != nil { + return nil, err + } + + seed, err := EncodeSeed(prefix, rawSeed[:]) + if err != nil { + return nil, err + } + return &kp{seed}, nil +} + +// rawSeed will return the raw, decoded 64 byte seed. +func (pair *kp) rawSeed() ([]byte, error) { + _, raw, err := DecodeSeed(pair.seed) + return raw, err +} + +// keys will return a 32 byte public key and a 64 byte private key utilizing the seed. +func (pair *kp) keys() (ed25519.PublicKey, ed25519.PrivateKey, error) { + raw, err := pair.rawSeed() + if err != nil { + return nil, nil, err + } + return ed25519.GenerateKey(bytes.NewReader(raw)) +} + +// Wipe will randomize the contents of the seed key +func (pair *kp) Wipe() { + io.ReadFull(rand.Reader, pair.seed) + pair.seed = nil +} + +// Seed will return the encoded seed. +func (pair *kp) Seed() ([]byte, error) { + return pair.seed, nil +} + +// PublicKey will return the encoded public key associated with the KeyPair. +// All KeyPairs have a public key. +func (pair *kp) PublicKey() (string, error) { + public, raw, err := DecodeSeed(pair.seed) + if err != nil { + return "", err + } + pub, _, err := ed25519.GenerateKey(bytes.NewReader(raw)) + if err != nil { + return "", err + } + pk, err := Encode(public, pub) + if err != nil { + return "", err + } + return string(pk), nil +} + +// PrivateKey will return the encoded private key for KeyPair. +func (pair *kp) PrivateKey() ([]byte, error) { + _, priv, err := pair.keys() + if err != nil { + return nil, err + } + return Encode(PrefixBytePrivate, priv) +} + +// Sign will sign the input with KeyPair's private key. +func (pair *kp) Sign(input []byte) ([]byte, error) { + _, priv, err := pair.keys() + if err != nil { + return nil, err + } + return ed25519.Sign(priv, input), nil +} + +// Verify will verify the input against a signature utilizing the public key. +func (pair *kp) Verify(input []byte, sig []byte) error { + pub, _, err := pair.keys() + if err != nil { + return err + } + if !ed25519.Verify(pub, input, sig) { + return ErrInvalidSignature + } + return nil +} + +// Seal is only supported on CurveKeyPair +func (pair *kp) Seal(input []byte, recipient string) ([]byte, error) { + return nil, ErrInvalidNKeyOperation +} + +// SealWithRand is only supported on CurveKeyPair +func (pair *kp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) { + return nil, ErrInvalidNKeyOperation +} + +// Open is only supported on CurveKey +func (pair *kp) Open(input []byte, sender string) ([]byte, error) { + return nil, ErrInvalidNKeyOperation +} diff --git a/vendor/github.com/nats-io/nkeys/nkeys.go b/vendor/github.com/nats-io/nkeys/nkeys.go new file mode 100644 index 00000000..0db0f0c1 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/nkeys.go @@ -0,0 +1,100 @@ +// Copyright 2018-2019 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nkeys is an Ed25519 based public-key signature system that simplifies keys and seeds +// and performs signing and verification. +// It also supports encryption via x25519 keys and is compatible with https://pkg.go.dev/golang.org/x/crypto/nacl/box. +package nkeys + +import "io" + +// Version is our current version +const Version = "0.4.6" + +// KeyPair provides the central interface to nkeys. +type KeyPair interface { + Seed() ([]byte, error) + PublicKey() (string, error) + PrivateKey() ([]byte, error) + // Sign is only supported on Non CurveKeyPairs + Sign(input []byte) ([]byte, error) + // Verify is only supported on Non CurveKeyPairs + Verify(input []byte, sig []byte) error + Wipe() + // Seal is only supported on CurveKeyPair + Seal(input []byte, recipient string) ([]byte, error) + // SealWithRand is only supported on CurveKeyPair + SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) + // Open is only supported on CurveKey + Open(input []byte, sender string) ([]byte, error) +} + +// CreateUser will create a User typed KeyPair. +func CreateUser() (KeyPair, error) { + return CreatePair(PrefixByteUser) +} + +// CreateAccount will create an Account typed KeyPair. +func CreateAccount() (KeyPair, error) { + return CreatePair(PrefixByteAccount) +} + +// CreateServer will create a Server typed KeyPair. +func CreateServer() (KeyPair, error) { + return CreatePair(PrefixByteServer) +} + +// CreateCluster will create a Cluster typed KeyPair. +func CreateCluster() (KeyPair, error) { + return CreatePair(PrefixByteCluster) +} + +// CreateOperator will create an Operator typed KeyPair. +func CreateOperator() (KeyPair, error) { + return CreatePair(PrefixByteOperator) +} + +// FromPublicKey will create a KeyPair capable of verifying signatures. +func FromPublicKey(public string) (KeyPair, error) { + raw, err := decode([]byte(public)) + if err != nil { + return nil, err + } + pre := PrefixByte(raw[0]) + if err := checkValidPublicPrefixByte(pre); err != nil { + return nil, ErrInvalidPublicKey + } + return &pub{pre, raw[1:]}, nil +} + +// FromSeed will create a KeyPair capable of signing and verifying signatures. +func FromSeed(seed []byte) (KeyPair, error) { + prefix, _, err := DecodeSeed(seed) + if err != nil { + return nil, err + } + if prefix == PrefixByteCurve { + return FromCurveSeed(seed) + } + copy := append([]byte{}, seed...) + return &kp{copy}, nil +} + +// FromRawSeed will create a KeyPair from the raw 32 byte seed for a given type. +func FromRawSeed(prefix PrefixByte, rawSeed []byte) (KeyPair, error) { + seed, err := EncodeSeed(prefix, rawSeed) + if err != nil { + return nil, err + } + return &kp{seed}, nil +} diff --git a/vendor/github.com/nats-io/nkeys/public.go b/vendor/github.com/nats-io/nkeys/public.go new file mode 100644 index 00000000..c3cd21ed --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/public.go @@ -0,0 +1,86 @@ +// Copyright 2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "crypto/rand" + "io" + + "golang.org/x/crypto/ed25519" +) + +// A KeyPair from a public key capable of verifying only. +type pub struct { + pre PrefixByte + pub ed25519.PublicKey +} + +// PublicKey will return the encoded public key associated with the KeyPair. +// All KeyPairs have a public key. +func (p *pub) PublicKey() (string, error) { + pk, err := Encode(p.pre, p.pub) + if err != nil { + return "", err + } + return string(pk), nil +} + +// Seed will return an error since this is not available for public key only KeyPairs. +func (p *pub) Seed() ([]byte, error) { + return nil, ErrPublicKeyOnly +} + +// PrivateKey will return an error since this is not available for public key only KeyPairs. +func (p *pub) PrivateKey() ([]byte, error) { + return nil, ErrPublicKeyOnly +} + +// Sign will return an error since this is not available for public key only KeyPairs. +func (p *pub) Sign(input []byte) ([]byte, error) { + return nil, ErrCannotSign +} + +// Verify will verify the input against a signature utilizing the public key. +func (p *pub) Verify(input []byte, sig []byte) error { + if !ed25519.Verify(p.pub, input, sig) { + return ErrInvalidSignature + } + return nil +} + +// Wipe will randomize the public key and erase the pre byte. +func (p *pub) Wipe() { + p.pre = '0' + io.ReadFull(rand.Reader, p.pub) +} + +func (p *pub) Seal(input []byte, recipient string) ([]byte, error) { + if p.pre == PrefixByteCurve { + return nil, ErrCannotSeal + } + return nil, ErrInvalidNKeyOperation +} +func (p *pub) SealWithRand(input []byte, _recipient string, rr io.Reader) ([]byte, error) { + if p.pre == PrefixByteCurve { + return nil, ErrCannotSeal + } + return nil, ErrInvalidNKeyOperation +} + +func (p *pub) Open(input []byte, sender string) ([]byte, error) { + if p.pre == PrefixByteCurve { + return nil, ErrCannotOpen + } + return nil, ErrInvalidNKeyOperation +} diff --git a/vendor/github.com/nats-io/nkeys/strkey.go b/vendor/github.com/nats-io/nkeys/strkey.go new file mode 100644 index 00000000..8ae33116 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/strkey.go @@ -0,0 +1,314 @@ +// Copyright 2018-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "bytes" + "encoding/base32" + "encoding/binary" +) + +// PrefixByte is a lead byte representing the type. +type PrefixByte byte + +const ( + // PrefixByteSeed is the version byte used for encoded NATS Seeds + PrefixByteSeed PrefixByte = 18 << 3 // Base32-encodes to 'S...' + + // PrefixBytePrivate is the version byte used for encoded NATS Private keys + PrefixBytePrivate PrefixByte = 15 << 3 // Base32-encodes to 'P...' + + // PrefixByteServer is the version byte used for encoded NATS Servers + PrefixByteServer PrefixByte = 13 << 3 // Base32-encodes to 'N...' + + // PrefixByteCluster is the version byte used for encoded NATS Clusters + PrefixByteCluster PrefixByte = 2 << 3 // Base32-encodes to 'C...' + + // PrefixByteOperator is the version byte used for encoded NATS Operators + PrefixByteOperator PrefixByte = 14 << 3 // Base32-encodes to 'O...' + + // PrefixByteAccount is the version byte used for encoded NATS Accounts + PrefixByteAccount PrefixByte = 0 // Base32-encodes to 'A...' + + // PrefixByteUser is the version byte used for encoded NATS Users + PrefixByteUser PrefixByte = 20 << 3 // Base32-encodes to 'U...' + + // PrefixByteCurve is the version byte used for encoded CurveKeys (X25519) + PrefixByteCurve PrefixByte = 23 << 3 // Base32-encodes to 'X...' + + // PrefixByteUnknown is for unknown prefixes. + PrefixByteUnknown PrefixByte = 25 << 3 // Base32-encodes to 'Z...' +) + +// Set our encoding to not include padding '==' +var b32Enc = base32.StdEncoding.WithPadding(base32.NoPadding) + +// Encode will encode a raw key or seed with the prefix and crc16 and then base32 encoded. +func Encode(prefix PrefixByte, src []byte) ([]byte, error) { + if err := checkValidPrefixByte(prefix); err != nil { + return nil, err + } + + var raw bytes.Buffer + + // write prefix byte + if err := raw.WriteByte(byte(prefix)); err != nil { + return nil, err + } + + // write payload + if _, err := raw.Write(src); err != nil { + return nil, err + } + + // Calculate and write crc16 checksum + err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) + if err != nil { + return nil, err + } + + data := raw.Bytes() + buf := make([]byte, b32Enc.EncodedLen(len(data))) + b32Enc.Encode(buf, data) + return buf[:], nil +} + +// EncodeSeed will encode a raw key with the prefix and then seed prefix and crc16 and then base32 encoded. +// `src` must be 32 bytes long (ed25519.SeedSize). +func EncodeSeed(public PrefixByte, src []byte) ([]byte, error) { + if err := checkValidPublicPrefixByte(public); err != nil { + return nil, err + } + + if len(src) != seedLen { + return nil, ErrInvalidSeedLen + } + + // In order to make this human printable for both bytes, we need to do a little + // bit manipulation to setup for base32 encoding which takes 5 bits at a time. + b1 := byte(PrefixByteSeed) | (byte(public) >> 5) + b2 := (byte(public) & 31) << 3 // 31 = 00011111 + + var raw bytes.Buffer + + raw.WriteByte(b1) + raw.WriteByte(b2) + + // write payload + if _, err := raw.Write(src); err != nil { + return nil, err + } + + // Calculate and write crc16 checksum + err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) + if err != nil { + return nil, err + } + + data := raw.Bytes() + buf := make([]byte, b32Enc.EncodedLen(len(data))) + b32Enc.Encode(buf, data) + return buf, nil +} + +// IsValidEncoding will tell you if the encoding is a valid key. +func IsValidEncoding(src []byte) bool { + _, err := decode(src) + return err == nil +} + +// decode will decode the base32 and check crc16 and the prefix for validity. +func decode(src []byte) ([]byte, error) { + raw := make([]byte, b32Enc.DecodedLen(len(src))) + n, err := b32Enc.Decode(raw, src) + if err != nil { + return nil, err + } + raw = raw[:n] + + if n < 4 { + return nil, ErrInvalidEncoding + } + + crc := binary.LittleEndian.Uint16(raw[n-2:]) + + // ensure checksum is valid + if err := validate(raw[0:n-2], crc); err != nil { + return nil, err + } + + return raw[:n-2], nil +} + +// Decode will decode the base32 string and check crc16 and enforce the prefix is what is expected. +func Decode(expectedPrefix PrefixByte, src []byte) ([]byte, error) { + if err := checkValidPrefixByte(expectedPrefix); err != nil { + return nil, err + } + raw, err := decode(src) + if err != nil { + return nil, err + } + b1 := raw[0] & 248 // 248 = 11111000 + if prefix := PrefixByte(b1); prefix != expectedPrefix { + return nil, ErrInvalidPrefixByte + } + return raw[1:], nil +} + +// DecodeSeed will decode the base32 string and check crc16 and enforce the prefix is a seed +// and the subsequent type is a valid type. +func DecodeSeed(src []byte) (PrefixByte, []byte, error) { + raw, err := decode(src) + if err != nil { + return PrefixByteSeed, nil, err + } + // Need to do the reverse here to get back to internal representation. + b1 := raw[0] & 248 // 248 = 11111000 + b2 := (raw[0]&7)<<5 | ((raw[1] & 248) >> 3) // 7 = 00000111 + + if PrefixByte(b1) != PrefixByteSeed { + return PrefixByteSeed, nil, ErrInvalidSeed + } + if checkValidPublicPrefixByte(PrefixByte(b2)) != nil { + return PrefixByteSeed, nil, ErrInvalidSeed + } + return PrefixByte(b2), raw[2:], nil +} + +// Prefix returns PrefixBytes of its input +func Prefix(src string) PrefixByte { + b, err := decode([]byte(src)) + if err != nil { + return PrefixByteUnknown + } + prefix := PrefixByte(b[0]) + err = checkValidPrefixByte(prefix) + if err == nil { + return prefix + } + // Might be a seed. + b1 := b[0] & 248 + if PrefixByte(b1) == PrefixByteSeed { + return PrefixByteSeed + } + return PrefixByteUnknown +} + +// IsValidPublicKey will decode and verify that the string is a valid encoded public key. +func IsValidPublicKey(src string) bool { + b, err := decode([]byte(src)) + if err != nil { + return false + } + if prefix := PrefixByte(b[0]); checkValidPublicPrefixByte(prefix) != nil { + return false + } + return true +} + +// IsValidPublicUserKey will decode and verify the string is a valid encoded Public User Key. +func IsValidPublicUserKey(src string) bool { + _, err := Decode(PrefixByteUser, []byte(src)) + return err == nil +} + +// IsValidPublicAccountKey will decode and verify the string is a valid encoded Public Account Key. +func IsValidPublicAccountKey(src string) bool { + _, err := Decode(PrefixByteAccount, []byte(src)) + return err == nil +} + +// IsValidPublicServerKey will decode and verify the string is a valid encoded Public Server Key. +func IsValidPublicServerKey(src string) bool { + _, err := Decode(PrefixByteServer, []byte(src)) + return err == nil +} + +// IsValidPublicClusterKey will decode and verify the string is a valid encoded Public Cluster Key. +func IsValidPublicClusterKey(src string) bool { + _, err := Decode(PrefixByteCluster, []byte(src)) + return err == nil +} + +// IsValidPublicOperatorKey will decode and verify the string is a valid encoded Public Operator Key. +func IsValidPublicOperatorKey(src string) bool { + _, err := Decode(PrefixByteOperator, []byte(src)) + return err == nil +} + +// IsValidPublicCurveKey will decode and verify the string is a valid encoded Public Curve Key. +func IsValidPublicCurveKey(src string) bool { + _, err := Decode(PrefixByteCurve, []byte(src)) + return err == nil +} + +// checkValidPrefixByte returns an error if the provided value +// is not one of the defined valid prefix byte constants. +func checkValidPrefixByte(prefix PrefixByte) error { + switch prefix { + case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, + PrefixByteAccount, PrefixByteUser, PrefixByteSeed, PrefixBytePrivate, PrefixByteCurve: + return nil + } + return ErrInvalidPrefixByte +} + +// checkValidPublicPrefixByte returns an error if the provided value +// is not one of the public defined valid prefix byte constants. +func checkValidPublicPrefixByte(prefix PrefixByte) error { + switch prefix { + case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, PrefixByteAccount, PrefixByteUser, PrefixByteCurve: + return nil + } + return ErrInvalidPrefixByte +} + +func (p PrefixByte) String() string { + switch p { + case PrefixByteOperator: + return "operator" + case PrefixByteServer: + return "server" + case PrefixByteCluster: + return "cluster" + case PrefixByteAccount: + return "account" + case PrefixByteUser: + return "user" + case PrefixByteSeed: + return "seed" + case PrefixBytePrivate: + return "private" + case PrefixByteCurve: + return "x25519" + } + return "unknown" +} + +// CompatibleKeyPair returns an error if the KeyPair doesn't match expected PrefixByte(s) +func CompatibleKeyPair(kp KeyPair, expected ...PrefixByte) error { + pk, err := kp.PublicKey() + if err != nil { + return err + } + pkType := Prefix(pk) + for _, k := range expected { + if pkType == k { + return nil + } + } + + return ErrIncompatibleKey +} diff --git a/vendor/github.com/nats-io/nkeys/xkeys.go b/vendor/github.com/nats-io/nkeys/xkeys.go new file mode 100644 index 00000000..78f8b99e --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/xkeys.go @@ -0,0 +1,185 @@ +// Copyright 2022-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "io" + + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/nacl/box" +) + +// This package will support safe use of X25519 keys for asymmetric encryption. +// We will be compatible with nacl.Box, but generate random nonces automatically. +// We may add more advanced options in the future for group recipients and better +// end to end algorithms. + +const ( + curveKeyLen = 32 + curveDecodeLen = 35 + curveNonceLen = 24 +) + +type ckp struct { + seed [curveKeyLen]byte // Private raw key. +} + +// CreateCurveKeys will create a Curve typed KeyPair. +func CreateCurveKeys() (KeyPair, error) { + return CreateCurveKeysWithRand(rand.Reader) +} + +// CreateCurveKeysWithRand will create a Curve typed KeyPair +// with specified rand source. +func CreateCurveKeysWithRand(rr io.Reader) (KeyPair, error) { + var kp ckp + _, err := io.ReadFull(rr, kp.seed[:]) + if err != nil { + return nil, err + } + return &kp, nil +} + +// Will create a curve key pair from seed. +func FromCurveSeed(seed []byte) (KeyPair, error) { + pb, raw, err := DecodeSeed(seed) + if err != nil { + return nil, err + } + if pb != PrefixByteCurve || len(raw) != curveKeyLen { + return nil, ErrInvalidCurveSeed + } + var kp ckp + copy(kp.seed[:], raw) + return &kp, nil +} + +// Seed will return the encoded seed. +func (pair *ckp) Seed() ([]byte, error) { + return EncodeSeed(PrefixByteCurve, pair.seed[:]) +} + +// PublicKey will return the encoded public key. +func (pair *ckp) PublicKey() (string, error) { + var pub [curveKeyLen]byte + curve25519.ScalarBaseMult(&pub, &pair.seed) + key, err := Encode(PrefixByteCurve, pub[:]) + return string(key), err +} + +// PrivateKey will return the encoded private key. +func (pair *ckp) PrivateKey() ([]byte, error) { + return Encode(PrefixBytePrivate, pair.seed[:]) +} + +func decodePubCurveKey(src string, dest []byte) error { + var raw [curveDecodeLen]byte // should always be 35 + n, err := b32Enc.Decode(raw[:], []byte(src)) + if err != nil { + return err + } + if n != curveDecodeLen { + return ErrInvalidCurveKey + } + // Make sure it is what we expected. + if prefix := PrefixByte(raw[0]); prefix != PrefixByteCurve { + return ErrInvalidPublicKey + } + var crc uint16 + end := n - 2 + sum := raw[end:n] + checksum := bytes.NewReader(sum) + if err := binary.Read(checksum, binary.LittleEndian, &crc); err != nil { + return err + } + + // ensure checksum is valid + if err := validate(raw[:end], crc); err != nil { + return err + } + + // Copy over, ignore prefix byte. + copy(dest, raw[1:end]) + return nil +} + +// Only version for now, but could add in X3DH in the future, etc. +const XKeyVersionV1 = "xkv1" +const vlen = len(XKeyVersionV1) + +// Seal is compatible with nacl.Box.Seal() and can be used in similar situations for small messages. +// We generate the nonce from crypto rand by default. +func (pair *ckp) Seal(input []byte, recipient string) ([]byte, error) { + return pair.SealWithRand(input, recipient, rand.Reader) +} + +func (pair *ckp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) { + var ( + rpub [curveKeyLen]byte + nonce [curveNonceLen]byte + out [vlen + curveNonceLen]byte + err error + ) + + if err = decodePubCurveKey(recipient, rpub[:]); err != nil { + return nil, ErrInvalidRecipient + } + if _, err := io.ReadFull(rr, nonce[:]); err != nil { + return nil, err + } + copy(out[:vlen], []byte(XKeyVersionV1)) + copy(out[vlen:], nonce[:]) + return box.Seal(out[:], input, &nonce, &rpub, &pair.seed), nil +} + +func (pair *ckp) Open(input []byte, sender string) ([]byte, error) { + if len(input) <= vlen+curveNonceLen { + return nil, ErrInvalidEncrypted + } + var ( + spub [curveKeyLen]byte + nonce [curveNonceLen]byte + err error + ) + if !bytes.Equal(input[:vlen], []byte(XKeyVersionV1)) { + return nil, ErrInvalidEncVersion + } + copy(nonce[:], input[vlen:vlen+curveNonceLen]) + + if err = decodePubCurveKey(sender, spub[:]); err != nil { + return nil, ErrInvalidSender + } + + decrypted, ok := box.Open(nil, input[vlen+curveNonceLen:], &nonce, &spub, &pair.seed) + if !ok { + return nil, ErrCouldNotDecrypt + } + return decrypted, nil +} + +// Wipe will randomize the contents of the secret key +func (pair *ckp) Wipe() { + io.ReadFull(rand.Reader, pair.seed[:]) +} + +func (pair *ckp) Sign(_ []byte) ([]byte, error) { + return nil, ErrInvalidCurveKeyOperation +} + +func (pair *ckp) Verify(_ []byte, _ []byte) error { + return ErrInvalidCurveKeyOperation +} diff --git a/vendor/github.com/nats-io/nuid/.gitignore b/vendor/github.com/nats-io/nuid/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/nats-io/nuid/.travis.yml b/vendor/github.com/nats-io/nuid/.travis.yml new file mode 100644 index 00000000..52be7265 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/.travis.yml @@ -0,0 +1,17 @@ +language: go +sudo: false +go: +- 1.9.x +- 1.10.x + +install: +- go get -t ./... +- go get github.com/mattn/goveralls + +script: +- go fmt ./... +- go vet ./... +- go test -v +- go test -v --race +- go test -v -covermode=count -coverprofile=coverage.out +- $HOME/gopath/bin/goveralls -coverprofile coverage.out -service travis-ci diff --git a/vendor/github.com/nats-io/nuid/GOVERNANCE.md b/vendor/github.com/nats-io/nuid/GOVERNANCE.md new file mode 100644 index 00000000..01aee70d --- /dev/null +++ b/vendor/github.com/nats-io/nuid/GOVERNANCE.md @@ -0,0 +1,3 @@ +# NATS NUID Governance + +NATS NUID is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nuid/LICENSE b/vendor/github.com/nats-io/nuid/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/nuid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/nuid/MAINTAINERS.md b/vendor/github.com/nats-io/nuid/MAINTAINERS.md new file mode 100644 index 00000000..6d0ed3e3 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/MAINTAINERS.md @@ -0,0 +1,6 @@ +# Maintainers + +Maintainership is on a per project basis. + +### Core-maintainers + - Derek Collison [@derekcollison](https://github.com/derekcollison) \ No newline at end of file diff --git a/vendor/github.com/nats-io/nuid/README.md b/vendor/github.com/nats-io/nuid/README.md new file mode 100644 index 00000000..16e53948 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/README.md @@ -0,0 +1,47 @@ +# NUID + +[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) +[![ReportCard](http://goreportcard.com/badge/nats-io/nuid)](http://goreportcard.com/report/nats-io/nuid) +[![Build Status](https://travis-ci.org/nats-io/nuid.svg?branch=master)](http://travis-ci.org/nats-io/nuid) +[![Release](https://img.shields.io/badge/release-v1.0.1-1eb0fc.svg)](https://github.com/nats-io/nuid/releases/tag/v1.0.1) +[![GoDoc](http://godoc.org/github.com/nats-io/nuid?status.png)](http://godoc.org/github.com/nats-io/nuid) +[![Coverage Status](https://coveralls.io/repos/github/nats-io/nuid/badge.svg?branch=master)](https://coveralls.io/github/nats-io/nuid?branch=master) + +A highly performant unique identifier generator. + +## Installation + +Use the `go` command: + + $ go get github.com/nats-io/nuid + +## Basic Usage +```go + +// Utilize the global locked instance +nuid := nuid.Next() + +// Create an instance, these are not locked. +n := nuid.New() +nuid = n.Next() + +// Generate a new crypto/rand seeded prefix. +// Generally not needed, happens automatically. +n.RandomizePrefix() +``` + +## Performance +NUID needs to be very fast to generate and be truly unique, all while being entropy pool friendly. +NUID uses 12 bytes of crypto generated data (entropy draining), and 10 bytes of pseudo-random +sequential data that increments with a pseudo-random increment. + +Total length of a NUID string is 22 bytes of base 62 ascii text, so 62^22 or +2707803647802660400290261537185326956544 possibilities. + +NUID can generate identifiers as fast as 60ns, or ~16 million per second. There is an associated +benchmark you can use to test performance on your own hardware. + +## License + +Unless otherwise noted, the NATS source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. diff --git a/vendor/github.com/nats-io/nuid/nuid.go b/vendor/github.com/nats-io/nuid/nuid.go new file mode 100644 index 00000000..8134c764 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/nuid.go @@ -0,0 +1,135 @@ +// Copyright 2016-2019 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly. +package nuid + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "sync" + "time" + + prand "math/rand" +) + +// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly. +// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data +// that is started at a pseudo random number and increments with a pseudo-random increment. +// Total is 22 bytes of base 62 ascii text :) + +// Version of the library +const Version = "1.0.1" + +const ( + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + base = 62 + preLen = 12 + seqLen = 10 + maxSeq = int64(839299365868340224) // base^seqLen == 62^10 + minInc = int64(33) + maxInc = int64(333) + totalLen = preLen + seqLen +) + +type NUID struct { + pre []byte + seq int64 + inc int64 +} + +type lockedNUID struct { + sync.Mutex + *NUID +} + +// Global NUID +var globalNUID *lockedNUID + +// Seed sequential random with crypto or math/random and current time +// and generate crypto prefix. +func init() { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + prand.Seed(time.Now().UnixNano()) + } else { + prand.Seed(r.Int64()) + } + globalNUID = &lockedNUID{NUID: New()} + globalNUID.RandomizePrefix() +} + +// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment. +func New() *NUID { + n := &NUID{ + seq: prand.Int63n(maxSeq), + inc: minInc + prand.Int63n(maxInc-minInc), + pre: make([]byte, preLen), + } + n.RandomizePrefix() + return n +} + +// Generate the next NUID string from the global locked NUID instance. +func Next() string { + globalNUID.Lock() + nuid := globalNUID.Next() + globalNUID.Unlock() + return nuid +} + +// Generate the next NUID string. +func (n *NUID) Next() string { + // Increment and capture. + n.seq += n.inc + if n.seq >= maxSeq { + n.RandomizePrefix() + n.resetSequential() + } + seq := n.seq + + // Copy prefix + var b [totalLen]byte + bs := b[:preLen] + copy(bs, n.pre) + + // copy in the seq in base62. + for i, l := len(b), seq; i > preLen; l /= base { + i -= 1 + b[i] = digits[l%base] + } + return string(b[:]) +} + +// Resets the sequential portion of the NUID. +func (n *NUID) resetSequential() { + n.seq = prand.Int63n(maxSeq) + n.inc = minInc + prand.Int63n(maxInc-minInc) +} + +// Generate a new prefix from crypto/rand. +// This call *can* drain entropy and will be called automatically when we exhaust the sequential range. +// Will panic if it gets an error from rand.Int() +func (n *NUID) RandomizePrefix() { + var cb [preLen]byte + cbs := cb[:] + if nb, err := rand.Read(cbs); nb != preLen || err != nil { + panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err)) + } + + for i := 0; i < preLen; i++ { + n.pre[i] = digits[int(cbs[i])%base] + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000..d2e98d42 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000..199c21d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000..9ae8206c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,744 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000..adfac00c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,278 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000..3168a8aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000..6e28668c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000..52c414db --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000..54e446e1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 00000000..a7828345 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/crypto/nacl/box/box.go b/vendor/golang.org/x/crypto/nacl/box/box.go new file mode 100644 index 00000000..7f3b830e --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/box/box.go @@ -0,0 +1,182 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package box authenticates and encrypts small messages using public-key cryptography. + +Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate +messages. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html. +Anonymous sealing/opening is an extension of NaCl defined by and interoperable +with libsodium: +https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes. +*/ +package box // import "golang.org/x/crypto/nacl/box" + +import ( + cryptorand "crypto/rand" + "io" + + "golang.org/x/crypto/blake2b" + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/salsa20/salsa" +) + +const ( + // Overhead is the number of bytes of overhead when boxing a message. + Overhead = secretbox.Overhead + + // AnonymousOverhead is the number of bytes of overhead when using anonymous + // sealed boxes. + AnonymousOverhead = Overhead + 32 +) + +// GenerateKey generates a new public/private key pair suitable for use with +// Seal and Open. +func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) { + publicKey = new([32]byte) + privateKey = new([32]byte) + _, err = io.ReadFull(rand, privateKey[:]) + if err != nil { + publicKey = nil + privateKey = nil + return + } + + curve25519.ScalarBaseMult(publicKey, privateKey) + return +} + +var zeros [16]byte + +// Precompute calculates the shared key between peersPublicKey and privateKey +// and writes it to sharedKey. The shared key can be used with +// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing +// when using the same pair of keys repeatedly. +func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) { + curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey) + salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma) +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// will be Overhead bytes longer than the original and must not overlap it. The +// nonce must be unique for each distinct message for a given pair of keys. +func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte { + var sharedKey [32]byte + Precompute(&sharedKey, peersPublicKey, privateKey) + return secretbox.Seal(out, message, nonce, &sharedKey) +} + +// SealAfterPrecomputation performs the same actions as Seal, but takes a +// shared key as generated by Precompute. +func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte { + return secretbox.Seal(out, message, nonce, sharedKey) +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) { + var sharedKey [32]byte + Precompute(&sharedKey, peersPublicKey, privateKey) + return secretbox.Open(out, box, nonce, &sharedKey) +} + +// OpenAfterPrecomputation performs the same actions as Open, but takes a +// shared key as generated by Precompute. +func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) { + return secretbox.Open(out, box, nonce, sharedKey) +} + +// SealAnonymous appends an encrypted and authenticated copy of message to out, +// which will be AnonymousOverhead bytes longer than the original and must not +// overlap it. This differs from Seal in that the sender is not required to +// provide a private key. +func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) { + if rand == nil { + rand = cryptorand.Reader + } + ephemeralPub, ephemeralPriv, err := GenerateKey(rand) + if err != nil { + return nil, err + } + + var nonce [24]byte + if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil { + return nil, err + } + + if total := len(out) + AnonymousOverhead + len(message); cap(out) < total { + original := out + out = make([]byte, 0, total) + out = append(out, original...) + } + out = append(out, ephemeralPub[:]...) + + return Seal(out, message, &nonce, recipient, ephemeralPriv), nil +} + +// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and +// appends the message to out, which must not overlap box. The output will be +// AnonymousOverhead bytes smaller than box. +func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) { + if len(box) < AnonymousOverhead { + return nil, false + } + + var ephemeralPub [32]byte + copy(ephemeralPub[:], box[:32]) + + var nonce [24]byte + if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil { + return nil, false + } + + return Open(out, box[32:], &nonce, &ephemeralPub, privateKey) +} + +// sealNonce generates a 24 byte nonce that is a blake2b digest of the +// ephemeral public key and the receiver's public key. +func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error { + h, err := blake2b.New(24, nil) + if err != nil { + return err + } + + if _, err = h.Write(ephemeralPub[:]); err != nil { + return err + } + + if _, err = h.Write(peersPublicKey[:]); err != nil { + return err + } + + h.Sum(nonce[:0]) + + return nil +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 00000000..f3c3242a --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if alias.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if alias.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 00000000..3fd05b27 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,146 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +import "math/bits" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 00000000..7ec7bb39 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,201 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 00000000..e76b44fe --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 00000000..fcce0234 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,880 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + ADDQ $31, R12 + ANDQ $~31, R12 + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(R12) + MOVL R8, 4 (R12) + MOVL AX, 8 (R12) + MOVL R11, 12 (R12) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(R12) + MOVL R8, 20 (R12) + MOVL AX, 24 (R12) + MOVL R11, 28 (R12) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(R12) + MOVL CX, 36 (R12) + MOVL R8, 40 (R12) + MOVL AX, 44 (R12) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(R12) + MOVL CX, 52 (R12) + MOVL R8, 56 (R12) + MOVL AX, 60 (R12) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(R12) + MOVOA X2,80(R12) + MOVOA X3,96(R12) + MOVOA X0,112(R12) + MOVOA 0(R12),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(R12) + MOVOA X2,144(R12) + MOVOA X3,160(R12) + MOVOA X0,176(R12) + MOVOA 16(R12),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(R12) + MOVOA X2,208(R12) + MOVOA X0,224(R12) + MOVOA 32(R12),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(R12) + MOVOA X2,256(R12) + MOVOA X0,272(R12) + BYTESATLEAST256: + MOVL 16(R12),DX + MOVL 36 (R12),CX + MOVL DX,288(R12) + MOVL CX,304(R12) + SHLQ $32,CX + ADDQ CX,DX + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (R12) + MOVL CX, 308 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (R12) + MOVL CX, 312 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (R12) + MOVL CX, 316 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(R12) + MOVL CX, 36 (R12) + MOVQ R9,352(R12) + MOVQ $20,DX + MOVOA 64(R12),X0 + MOVOA 80(R12),X1 + MOVOA 96(R12),X2 + MOVOA 256(R12),X3 + MOVOA 272(R12),X4 + MOVOA 128(R12),X5 + MOVOA 144(R12),X6 + MOVOA 176(R12),X7 + MOVOA 192(R12),X8 + MOVOA 208(R12),X9 + MOVOA 224(R12),X10 + MOVOA 304(R12),X11 + MOVOA 112(R12),X12 + MOVOA 160(R12),X13 + MOVOA 240(R12),X14 + MOVOA 288(R12),X15 + MAINLOOP1: + MOVOA X1,320(R12) + MOVOA X2,336(R12) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(R12),X1 + MOVOA X12,320(R12) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(R12),X2 + MOVOA X0,336(R12) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(R12),X0 + MOVOA X1,320(R12) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(R12),X12 + MOVOA X2,336(R12) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(R12),X1 + MOVOA X0,320(R12) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(R12),X2 + MOVOA X12,336(R12) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(R12),X12 + MOVOA 336(R12),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(R12),X12 + PADDL 176(R12),X7 + PADDL 224(R12),X10 + PADDL 272(R12),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(R12),X14 + PADDL 64(R12),X0 + PADDL 128(R12),X5 + PADDL 192(R12),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(R12),X15 + PADDL 304(R12),X11 + PADDL 80(R12),X1 + PADDL 144(R12),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(R12),X13 + PADDL 208(R12),X9 + PADDL 256(R12),X3 + PADDL 96(R12),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(R12),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(R12),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(R12),DI + LEAQ 360(R12),SI + NOCOPY: + MOVQ R9,352(R12) + MOVOA 48(R12),X0 + MOVOA 0(R12),X1 + MOVOA 16(R12),X2 + MOVOA 32(R12),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(R12),X0 + PADDL 0(R12),X1 + PADDL 16(R12),X2 + PADDL 32(R12),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(R12),R9 + MOVL 16(R12),CX + MOVL 36 (R12),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(R12) + MOVL R8, 36 (R12) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 00000000..9448760f --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 00000000..e5cdb9a2 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,233 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 8fb26aeb..1f539388 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -319,9 +319,9 @@ MOVQ rDi, _si(oState); \ MOVQ rDo, _so(oState) \ -// func keccakF1600(state *[25]uint64) +// func keccakF1600(a *[25]uint64) TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ state+0(FP), rpState + MOVQ a+0(FP), rpState // Convert the user state into an internal state NOTQ _be(rpState) diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go index c0834c00..cc0bb7ab 100644 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -187,9 +187,11 @@ type channel struct { pending *buffer extPending *buffer - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 + // windowMu protects myWindow, the flow-control window, and myConsumed, + // the number of bytes consumed since we last increased myWindow + windowMu sync.Mutex + myWindow uint32 + myConsumed uint32 // writeMu serializes calls to mux.conn.writePacket() and // protects sentClose and packetPool. This mutex must be @@ -332,14 +334,24 @@ func (ch *channel) handleData(packet []byte) error { return nil } -func (c *channel) adjustWindow(n uint32) error { +func (c *channel) adjustWindow(adj uint32) error { c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) + // Since myConsumed and myWindow are managed on our side, and can never + // exceed the initial window setting, we don't worry about overflow. + c.myConsumed += adj + var sendAdj uint32 + if (channelWindowSize-c.myWindow > 3*c.maxIncomingPayload) || + (c.myWindow < channelWindowSize/2) { + sendAdj = c.myConsumed + c.myConsumed = 0 + c.myWindow += sendAdj + } c.windowMu.Unlock() + if sendAdj == 0 { + return nil + } return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), + AdditionalBytes: sendAdj, }) } diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index bdc356cb..fd8c4974 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -82,7 +82,7 @@ func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan if err := conn.clientHandshake(addr, &fullConf); err != nil { c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %w", err) } conn.mux = newMux(conn.transport) return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 49bbba76..56cdc7c2 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -35,6 +35,16 @@ type keyingTransport interface { // direction will be effected if a msgNewKeys message is sent // or received. prepareKeyChange(*algorithms, *kexResult) error + + // setStrictMode sets the strict KEX mode, notably triggering + // sequence number resets on sending or receiving msgNewKeys. + // If the sequence number is already > 1 when setStrictMode + // is called, an error is returned. + setStrictMode() error + + // setInitialKEXDone indicates to the transport that the initial key exchange + // was completed + setInitialKEXDone() } // handshakeTransport implements rekeying on top of a keyingTransport @@ -100,6 +110,10 @@ type handshakeTransport struct { // The session ID or nil if first kex did not complete yet. sessionID []byte + + // strictMode indicates if the other side of the handshake indicated + // that we should be following the strict KEX protocol restrictions. + strictMode bool } type pendingKex struct { @@ -209,7 +223,10 @@ func (t *handshakeTransport) readLoop() { close(t.incoming) break } - if p[0] == msgIgnore || p[0] == msgDebug { + // If this is the first kex, and strict KEX mode is enabled, + // we don't ignore any messages, as they may be used to manipulate + // the packet sequence numbers. + if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { continue } t.incoming <- p @@ -441,6 +458,11 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { return successPacket, nil } +const ( + kexStrictClient = "kex-strict-c-v00@openssh.com" + kexStrictServer = "kex-strict-s-v00@openssh.com" +) + // sendKexInit sends a key change message. func (t *handshakeTransport) sendKexInit() error { t.mu.Lock() @@ -454,7 +476,6 @@ func (t *handshakeTransport) sendKexInit() error { } msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, CiphersClientServer: t.config.Ciphers, CiphersServerClient: t.config.Ciphers, MACsClientServer: t.config.MACs, @@ -464,6 +485,13 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) + // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, + // and possibly to add the ext-info extension algorithm. Since the slice may be the + // user owned KeyExchanges, we create our own slice in order to avoid using user + // owned memory by mistake. + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + isServer := len(t.hostKeys) > 0 if isServer { for _, k := range t.hostKeys { @@ -488,17 +516,24 @@ func (t *handshakeTransport) sendKexInit() error { msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } + + if t.sessionID == nil { + msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) + } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what // algorithms the server supports for public key authentication. See RFC // 8308, Section 2.1. + // + // We also send the strict KEX mode extension algorithm, in order to opt + // into the strict KEX mode. if firstKeyExchange := t.sessionID == nil; firstKeyExchange { - msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) - msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) } + } packet := Marshal(msg) @@ -604,6 +639,13 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return err } + if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + t.strictMode = true + if err := t.conn.setStrictMode(); err != nil { + return err + } + } + // We don't send FirstKexFollows, but we handle receiving it. // // RFC 4253 section 7 defines the kex and the agreement method for @@ -679,6 +721,12 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return unexpectedMessageError(msgNewKeys, packet[0]) } + if firstKeyExchange { + // Indicates to the transport that the first key exchange is completed + // after receiving SSH_MSG_NEWKEYS. + t.conn.setInitialKEXDone() + } + return nil } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 7f0c236a..c2dfe326 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -213,6 +213,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } else { for _, algo := range fullConf.PublicKeyAuthAlgorithms { if !contains(supportedPubKeyAuthAlgos, algo) { + c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) } } @@ -220,6 +221,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha // Check if the config contains any unsupported key exchanges for _, kex := range fullConf.KeyExchanges { if _, ok := serverForbiddenKexAlgos[kex]; ok { + c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) } } diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index da015801..0424d2d3 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -49,6 +49,9 @@ type transport struct { rand io.Reader isClient bool io.Closer + + strictMode bool + initialKEXDone bool } // packetCipher represents a combination of SSH encryption/MAC @@ -74,6 +77,18 @@ type connectionState struct { pendingKeyChange chan packetCipher } +func (t *transport) setStrictMode() error { + if t.reader.seqNum != 1 { + return errors.New("ssh: sequence number != 1 when strict KEX mode requested") + } + t.strictMode = true + return nil +} + +func (t *transport) setInitialKEXDone() { + t.initialKEXDone = true +} + // prepareKeyChange sets up key material for a keychange. The key changes in // both directions are triggered by reading and writing a msgNewKey packet // respectively. @@ -112,11 +127,12 @@ func (t *transport) printPacket(p []byte, write bool) { // Read and decrypt next packet. func (t *transport) readPacket() (p []byte, err error) { for { - p, err = t.reader.readPacket(t.bufReader) + p, err = t.reader.readPacket(t.bufReader, t.strictMode) if err != nil { break } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX + if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { break } } @@ -127,7 +143,7 @@ func (t *transport) readPacket() (p []byte, err error) { return p, err } -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { +func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) s.seqNum++ if err == nil && len(packet) == 0 { @@ -140,6 +156,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } default: return nil, errors.New("ssh: got bogus newkeys message") } @@ -170,10 +189,10 @@ func (t *transport) writePacket(packet []byte) error { if debugTransport { t.printPacket(packet, true) } - return t.writer.writePacket(t.bufWriter, t.rand, packet) + return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) } -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { changeKeys := len(packet) > 0 && packet[0] == msgNewKeys err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) @@ -188,6 +207,9 @@ func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet [] select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } default: panic("ssh: no key material for msgNewKeys") } diff --git a/vendor/modules.txt b/vendor/modules.txt index 4bb056a8..54d67cec 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -60,6 +60,25 @@ github.com/bramvdbogaerde/go-scp # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudevents/sdk-go/protocol/nats/v2 v2.15.2 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/protocol/nats/v2 +# github.com/cloudevents/sdk-go/v2 v2.15.2 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/v2 +github.com/cloudevents/sdk-go/v2/binding +github.com/cloudevents/sdk-go/v2/binding/format +github.com/cloudevents/sdk-go/v2/binding/spec +github.com/cloudevents/sdk-go/v2/client +github.com/cloudevents/sdk-go/v2/context +github.com/cloudevents/sdk-go/v2/event +github.com/cloudevents/sdk-go/v2/event/datacodec +github.com/cloudevents/sdk-go/v2/event/datacodec/json +github.com/cloudevents/sdk-go/v2/event/datacodec/text +github.com/cloudevents/sdk-go/v2/event/datacodec/xml +github.com/cloudevents/sdk-go/v2/protocol +github.com/cloudevents/sdk-go/v2/protocol/http +github.com/cloudevents/sdk-go/v2/types # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew @@ -183,6 +202,9 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/klauspost/compress v1.17.2 +## explicit; go 1.18 +github.com/klauspost/compress/flate # github.com/leodido/go-urn v1.2.1 ## explicit; go 1.13 github.com/leodido/go-urn @@ -212,6 +234,18 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/nats-io/nats.go v1.31.0 +## explicit; go 1.20 +github.com/nats-io/nats.go +github.com/nats-io/nats.go/encoders/builtin +github.com/nats-io/nats.go/internal/parser +github.com/nats-io/nats.go/util +# github.com/nats-io/nkeys v0.4.6 +## explicit; go 1.19 +github.com/nats-io/nkeys +# github.com/nats-io/nuid v1.0.1 +## explicit +github.com/nats-io/nuid # github.com/onsi/ginkgo/v2 v2.6.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 @@ -331,14 +365,19 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore -# golang.org/x/crypto v0.16.0 +# golang.org/x/crypto v0.17.0 ## explicit; go 1.18 +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field +golang.org/x/crypto/ed25519 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/nacl/box +golang.org/x/crypto/nacl/secretbox +golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf